blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32d769360c3b3706f42a2f42c8b12903939383f8 | 9b1446b26e81a79c303f9799fb6a91785c7adb03 | /.history/Code/markov_chain_20200120225120.py | c8637afc95c9347859961865560d6e5a25020a0a | [] | no_license | SamirIngley/CS1.2-Tweet-Gen | 017ea15b1113881a156ff24682828bc654eb6c81 | bcd95fa63e05849cbf8e36230d8e31032b99daaa | refs/heads/master | 2020-12-14T20:19:57.733290 | 2020-08-04T23:19:23 | 2020-08-04T23:19:23 | 234,856,234 | 0 | 0 | null | 2020-06-05T21:13:04 | 2020-01-19T07:05:55 | Python | UTF-8 | Python | false | false | 794 | py | import sample
from clean_text import clean
class Markov():
def __init__(self, corpus):
self.corpus = corpus
self.states = {}
self.chain()
def chain(self):
last_word = None
for word in self.corpus:
if last_word is not None: # set last word line 14
if last_word not in self.states: # if we haven't seen this word before
self.states[last_word] = Dictogram() # empty histogram as value
self[last_word].add_count(word) # add word to last word histogram
last_word = word # set word as last_word
def __str__(self):
return str(self.states)
if __name__ == '__main__':
source = 'one fish two fish red fish blue fish'
print(markov(source)) | [
"[email protected]"
] | |
61b0a836a83e88645081bc1ab3f28d2beac4fce3 | 2337351b228818e41be3002bd38f68f77c2aa074 | /core/confdb/syntax/protocols/lldp/hints.py | 3b89367275377af955519b66880edf74dbe19045 | [
"BSD-3-Clause"
] | permissive | nocproject/noc | 57d40c680a1499374463e472434f9595ed6d1374 | 6e6d71574e9b9d822bec572cc629a0ea73604a59 | refs/heads/master | 2023-08-31T01:11:33.544573 | 2023-08-30T17:31:11 | 2023-08-30T17:31:11 | 107,815,776 | 105 | 33 | BSD-3-Clause | 2023-07-31T07:57:45 | 2017-10-21T21:04:33 | Python | UTF-8 | Python | false | false | 839 | py | # ----------------------------------------------------------------------
# ConfDB hints protocols lldp syntax
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# NOC modules
from ...defs import DEF
from ...patterns import BOOL, IF_NAME
HINTS_PROTOCOLS_LLDP = DEF(
"lldp",
[
DEF("status", [DEF(BOOL, name="status", required=True, gen="make_global_lldp_status")]),
DEF(
"interface",
[
DEF(
IF_NAME,
[DEF("off", gen="make_lldp_interface_disable")],
multi=True,
name="interface",
)
],
),
],
)
| [
"[email protected]"
] | |
c3e795fbfe3826d2f5904f7e97ae0c1ae14fa894 | 3644db13925e6d518a9637edafa6247547ca90b4 | /interprocedural_analyses/taint/test/integration/functions_as_locals.py | f4f5aa681cb6601056ece250cd511564f500a956 | [
"MIT"
] | permissive | luizribeiro/pyre-check | 348699cecf82a5aa36f5e1301076cb006a2fb9f9 | 42d1fced8cbb94c4c9400d6fddd798e50d331ab9 | refs/heads/master | 2023-04-17T17:26:23.262598 | 2020-08-08T04:03:04 | 2020-08-08T04:03:35 | 285,969,507 | 0 | 0 | MIT | 2023-04-04T01:56:30 | 2020-08-08T04:01:31 | OCaml | UTF-8 | Python | false | false | 675 | py | # flake8: noqa
from builtins import __test_sink, __test_source
def foo(arg):
__test_sink(arg)
def foo_as_local():
x = __test_source()
f = foo
foo(x)
f(x)
def local_tito(arg):
f = foo
f(arg)
class C:
def m(self, arg):
__test_sink(arg)
def local_function_with_method_sink(c: C):
f = c.m
x = __test_source()
c.m(x)
f(x)
def method_tito(c: C, arg):
f = c.m
f(arg)
def barA(arg1: str, arg2: str):
__test_sink(arg1)
def barB(arg1: str, arg2: int):
__test_sink(arg2)
def a_or_b():
if 1 > 2:
f = barA
else:
f = barB
f(__test_source(), 0)
f(0, __test_source())
| [
"[email protected]"
] | |
c6b23600f363b1173b40bde086cf7afccd9b839d | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /dQZmkrPaKdtSat5f9_6.py | 326adc8b9d5d878706b0607ed434207c59a78551 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py |
def single_occurrence(txt):
txt = txt.upper()
Answer = ""
Counter = 0
Length = len(txt)
while (Counter < Length):
Item = txt[Counter]
Events = txt.count(Item)
if (Events == 1):
Answer = Item
return Answer
else:
Counter += 1
return Answer
| [
"[email protected]"
] | |
d5dfb9aaf7c429e06f387052e4b01e2a87138406 | 242f738d327bc0d35409100c506dc3266c254e6c | /aliyun-python-sdk-ecs/aliyunsdkecs/request/v20140526/DeleteForwardEntryRequest.py | aeb8a5cd3133a00a9ac6ff7017a989261379aaab | [
"Apache-2.0"
] | permissive | crazygit/aliyun-openapi-python-sdk | f5d70b89f01a02945fcdc43c3c1e32cc1b0fb2c1 | 3edb9f23f1ada79f4876678aacee88818cfeb11e | refs/heads/master | 2021-06-25T05:59:09.143419 | 2017-09-13T10:19:40 | 2017-09-13T10:19:40 | 103,385,054 | 0 | 0 | null | 2017-09-13T10:13:55 | 2017-09-13T10:13:55 | null | UTF-8 | Python | false | false | 2,138 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DeleteForwardEntryRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DeleteForwardEntry')
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_ForwardTableId(self):
return self.get_query_params().get('ForwardTableId')
def set_ForwardTableId(self,ForwardTableId):
self.add_query_param('ForwardTableId',ForwardTableId)
def get_ForwardEntryId(self):
return self.get_query_params().get('ForwardEntryId')
def set_ForwardEntryId(self,ForwardEntryId):
self.add_query_param('ForwardEntryId',ForwardEntryId)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"[email protected]"
] | |
fb1fa79cb27c7a6ce4a935e217688714206a1b88 | 32079a99520872be97e83ccbd3ae6f003f925006 | /devel/lib/python2.7/dist-packages/geographic_msgs/msg/_GeoPoseStamped.py | aaf7353b2780ed3961d18e3356795aab1a14a471 | [] | no_license | wndxwilson/Azimorph | a00fa8d34e664cc29cd9226ec378f93fa7df088e | 60b81694cadaaf30b9f640a4ed3bebd20ebc2f1a | refs/heads/master | 2023-02-16T12:55:26.046759 | 2021-01-08T22:09:30 | 2021-01-08T22:09:30 | 328,021,807 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,489 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from geographic_msgs/GeoPoseStamped.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geographic_msgs.msg
import geometry_msgs.msg
import std_msgs.msg
class GeoPoseStamped(genpy.Message):
_md5sum = "cc409c8ed6064d8a846fa207bf3fba6b"
_type = "geographic_msgs/GeoPoseStamped"
_has_header = True # flag to mark the presence of a Header object
_full_text = """Header header
geographic_msgs/GeoPose pose
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
================================================================================
MSG: geographic_msgs/GeoPose
# Geographic pose, using the WGS 84 reference ellipsoid.
#
# Orientation uses the East-North-Up (ENU) frame of reference.
# (But, what about singularities at the poles?)
GeoPoint position
geometry_msgs/Quaternion orientation
================================================================================
MSG: geographic_msgs/GeoPoint
# Geographic point, using the WGS 84 reference ellipsoid.
# Latitude [degrees]. Positive is north of equator; negative is south
# (-90 <= latitude <= +90).
float64 latitude
# Longitude [degrees]. Positive is east of prime meridian; negative is
# west (-180 <= longitude <= +180). At the poles, latitude is -90 or
# +90, and longitude is irrelevant, but must be in range.
float64 longitude
# Altitude [m]. Positive is above the WGS 84 ellipsoid (NaN if unspecified).
float64 altitude
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['header','pose']
_slot_types = ['std_msgs/Header','geographic_msgs/GeoPose']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,pose
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(GeoPoseStamped, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geographic_msgs.msg.GeoPose()
else:
self.header = std_msgs.msg.Header()
self.pose = geographic_msgs.msg.GeoPose()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self
buff.write(_get_struct_7d().pack(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.pose is None:
self.pose = geographic_msgs.msg.GeoPose()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 56
(_x.pose.position.latitude, _x.pose.position.longitude, _x.pose.position.altitude, _x.pose.orientation.x, _x.pose.orientation.y, _x.pose.orientation.z, _x.pose.orientation.w,) = _get_struct_7d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
_struct_7d = None
def _get_struct_7d():
global _struct_7d
if _struct_7d is None:
_struct_7d = struct.Struct("<7d")
return _struct_7d
| [
"[email protected]"
] | |
079956603181043e047fcfcd8ae48b9209a73544 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /sportsdata/mlb_projections/models/mlb_projections_dfs_slate_game.py | 4aadebaeb66acdeb4d93f89a1e1c5748361edf13 | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,117 | py | # coding: utf-8
"""
MLB v3 Projections
MLB projections API. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MlbProjectionsDfsSlateGame(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'slate_game_id': 'int',
'slate_id': 'int',
'game_id': 'int',
'game': 'MlbProjectionsGame',
'operator_game_id': 'int',
'removed_by_operator': 'bool'
}
attribute_map = {
'slate_game_id': 'SlateGameID',
'slate_id': 'SlateID',
'game_id': 'GameID',
'game': 'Game',
'operator_game_id': 'OperatorGameID',
'removed_by_operator': 'RemovedByOperator'
}
def __init__(self, slate_game_id=None, slate_id=None, game_id=None, game=None, operator_game_id=None, removed_by_operator=None): # noqa: E501
"""MlbProjectionsDfsSlateGame - a model defined in Swagger""" # noqa: E501
self._slate_game_id = None
self._slate_id = None
self._game_id = None
self._game = None
self._operator_game_id = None
self._removed_by_operator = None
self.discriminator = None
if slate_game_id is not None:
self.slate_game_id = slate_game_id
if slate_id is not None:
self.slate_id = slate_id
if game_id is not None:
self.game_id = game_id
if game is not None:
self.game = game
if operator_game_id is not None:
self.operator_game_id = operator_game_id
if removed_by_operator is not None:
self.removed_by_operator = removed_by_operator
@property
def slate_game_id(self):
"""Gets the slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._slate_game_id
@slate_game_id.setter
def slate_game_id(self, slate_game_id):
"""Sets the slate_game_id of this MlbProjectionsDfsSlateGame.
:param slate_game_id: The slate_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._slate_game_id = slate_game_id
@property
def slate_id(self):
"""Gets the slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._slate_id
@slate_id.setter
def slate_id(self, slate_id):
"""Sets the slate_id of this MlbProjectionsDfsSlateGame.
:param slate_id: The slate_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._slate_id = slate_id
@property
def game_id(self):
"""Gets the game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._game_id
@game_id.setter
def game_id(self, game_id):
"""Sets the game_id of this MlbProjectionsDfsSlateGame.
:param game_id: The game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._game_id = game_id
@property
def game(self):
"""Gets the game of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The game of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: MlbProjectionsGame
"""
return self._game
@game.setter
def game(self, game):
"""Sets the game of this MlbProjectionsDfsSlateGame.
:param game: The game of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: MlbProjectionsGame
"""
self._game = game
@property
def operator_game_id(self):
"""Gets the operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: int
"""
return self._operator_game_id
@operator_game_id.setter
def operator_game_id(self, operator_game_id):
"""Sets the operator_game_id of this MlbProjectionsDfsSlateGame.
:param operator_game_id: The operator_game_id of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: int
"""
self._operator_game_id = operator_game_id
@property
def removed_by_operator(self):
"""Gets the removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:return: The removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:rtype: bool
"""
return self._removed_by_operator
@removed_by_operator.setter
def removed_by_operator(self, removed_by_operator):
"""Sets the removed_by_operator of this MlbProjectionsDfsSlateGame.
:param removed_by_operator: The removed_by_operator of this MlbProjectionsDfsSlateGame. # noqa: E501
:type: bool
"""
self._removed_by_operator = removed_by_operator
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MlbProjectionsDfsSlateGame, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MlbProjectionsDfsSlateGame):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
318cd859b70a41e212785c1596ffdf88353bce76 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_7/snxkai001/util.py | 217a94e3e61b1d0258092af7a9640f7e96345ae2 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,079 | py | def create_grid (grid):
for u in range(4):
grid.append([])
for down in range(4):
grid[u].append(0)
def print_grid(grid):
print("+" + "-"*20 + "+")
allign= "{0:" "<5}"
for row in range(4):
print("|", end="")
for col in range(4):
if grid[row][col] != 0:
print(allign.format(grid[row][col]), end="")
else:
print(allign.format(" "), end= "")
print("|")
print("+" + "-"*20 + "+")
def check_lost(grid):
for kol in range(4):
for lef in range(4):
if grid[kol][lef]==0:
return False
else:
continue
for n in range(4):
for m in range(3):
if grid[m][n]==grid[m+1][n]:
return False
else:
continue
for i in range(4):
for j in range(3):
if grid[i][j]==grid[i][j+1]:
return False
else:
continue
return True
def check_won(grid):
for i in range(4):
for p in range(4):
if grid[i][p]>=32:
return True
else:
continue
return False
def grid_equal(grid1, grid2):
for i in range(4):
for j in range(4):
if grid1[i][j]==grid2[i][j]:
continue
else:
return False
return True
def copy_grid(grid):
list1=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
for col in range(4):
for row in range(4):
list1[col][row]=grid[col][row]
return list1
| [
"[email protected]"
] | |
39e716c97c55b1ae0ce73788baea20aa77976d3b | 9508879fcf1cff718f3fe80502baff8b82c04427 | /data_structures_domain/linked_lists/print_in_reverse.py | 9e70be3bfc61fc9bdc1e648101f1a043b9f0ec55 | [] | no_license | davidozhang/hackerrank | e37b4aace7d63c8be10b0d4d2bffb4d34d401d55 | bdc40d6ff3e603949eb294bbc02a1e24a4ba5b80 | refs/heads/master | 2021-05-04T11:31:59.110118 | 2017-11-15T09:17:27 | 2017-11-15T09:17:27 | 47,906,672 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 529 | py | """
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def ReversePrint(head):
if not head:
return
ReversePrint(head.next)
print head.data
'''
Cleaner implementation
October 1, 2016
'''
def ReversePrint(head):
if head is not None:
ReversePrint(head.next)
print head.data
| [
"[email protected]"
] | |
2ba36e1719cbf15b2cb9501534717d6961417159 | 2a9a136296e3d2abebf3a3dbfbbb091076e9f15f | /env/Lib/site-packages/werkzeug/debug/__init__.py | e678589f38dc51ac239012e27c5b00b0d099ac27 | [] | no_license | Lisukod/planet-tracker | a865e3920b858000f5d3de3b11f49c3d158e0e97 | 6714e6332b1dbccf7a3d44430620f308c9560eaa | refs/heads/master | 2023-02-18T19:26:16.705182 | 2021-01-23T01:51:58 | 2021-01-23T01:51:58 | 328,032,670 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,561 | py | # -*- coding: utf-8 -*-
"""
werkzeug.debug
~~~~~~~~~~~~~~
WSGI application traceback debugger.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import getpass
import hashlib
import json
import mimetypes
import os
import pkgutil
import re
import sys
import time
import uuid
from itertools import chain
from os.path import basename
from os.path import join
from .._compat import text_type
from .._internal import _log
from ..http import parse_cookie
from ..security import gen_salt
from ..wrappers import BaseRequest as Request
from ..wrappers import BaseResponse as Response
from .console import Console
from .tbtools import get_current_traceback
from .tbtools import render_console_html
# A week
PIN_TIME = 60 * 60 * 24 * 7
def hash_pin(pin):
if isinstance(pin, text_type):
pin = pin.encode("utf-8", "replace")
return hashlib.md5(pin + b"shittysalt").hexdigest()[:12]
_machine_id = None
def get_machine_id():
global _machine_id
if _machine_id is not None:
return _machine_id
def _generate():
linux = b""
# machine-id is stable across boots, boot_id is not.
for filename in "/etc/machine-id", "/proc/sys/kernel/random/boot_id":
try:
with open(filename, "rb") as f:
value = f.readline().strip()
except IOError:
continue
if value:
linux += value
break
# Containers share the same machine id, add some cgroup
# information. This is used outside containers too but should be
# relatively stable across boots.
try:
with open("/proc/self/cgroup", "rb") as f:
linux += f.readline().strip().rpartition(b"/")[2]
except IOError:
pass
if linux:
return linux
# On OS X, use ioreg to get the computer's serial number.
try:
# subprocess may not be available, e.g. Google App Engine
# https://github.com/pallets/werkzeug/issues/925
from subprocess import Popen, PIPE
dump = Popen(
["ioreg", "-c", "IOPlatformExpertDevice", "-d", "2"],
stdout=PIPE,
).communicate()[0]
match = re.search(b'"serial-number" = <([^>]+)', dump)
if match is not None:
return match.group(1)
except (OSError, ImportError):
pass
# On Windows, use winreg to get the machine guid.
try:
import winreg as wr
except ImportError:
try:
import _winreg as wr
except ImportError:
wr = None
if wr is not None:
try:
with wr.OpenKey(
wr.HKEY_LOCAL_MACHINE,
"SOFTWARE\\Microsoft\\Cryptography",
0,
wr.KEY_READ | wr.KEY_WOW64_64KEY,
) as rk:
guid, guid_type = wr.QueryValueEx(rk, "MachineGuid")
if guid_type == wr.REG_SZ:
return guid.encode("utf-8")
return guid
except WindowsError:
pass
_machine_id = _generate()
return _machine_id
class _ConsoleFrame(object):
"""Helper class so that we can reuse the frame console code for the
standalone console.
"""
def __init__(self, namespace):
self.console = Console(namespace)
self.id = 0
def get_pin_and_cookie_name(app):
"""Given an application object this returns a semi-stable 9 digit pin
code and a random key. The hope is that this is stable between
restarts to not make debugging particularly frustrating. If the pin
was forcefully disabled this returns `None`.
Second item in the resulting tuple is the cookie name for remembering.
"""
pin = os.environ.get("WERKZEUG_DEBUG_PIN")
rv = None
num = None
# Pin was explicitly disabled
if pin == "off":
return None, None
# Pin was provided explicitly
if pin is not None and pin.replace("-", "").isdigit():
# If there are separators in the pin, return it directly
if "-" in pin:
rv = pin
else:
num = pin
modname = getattr(app, "__module__", app.__class__.__module__)
try:
# getuser imports the pwd module, which does not exist in Google
# App Engine. It may also raise a KeyError if the UID does not
# have a username, such as in Docker.
username = getpass.getuser()
except (ImportError, KeyError):
username = None
mod = sys.modules.get(modname)
# This information only exists to make the cookie unique on the
# computer, not as a security feature.
probably_public_bits = [
username,
modname,
getattr(app, "__name__", app.__class__.__name__),
getattr(mod, "__file__", None),
]
# This information is here to make it harder for an attacker to
# guess the cookie name. They are unlikely to be contained anywhere
# within the unauthenticated debug page.
private_bits = [str(uuid.getnode()), get_machine_id()]
h = hashlib.md5()
for bit in chain(probably_public_bits, private_bits):
if not bit:
continue
if isinstance(bit, text_type):
bit = bit.encode("utf-8")
h.update(bit)
h.update(b"cookiesalt")
cookie_name = "__wzd" + h.hexdigest()[:20]
# If we need to generate a pin we salt it a bit more so that we don't
# end up with the same value and generate out 9 digits
if num is None:
h.update(b"pinsalt")
num = ("%09d" % int(h.hexdigest(), 16))[:9]
# Format the pincode in groups of digits for easier remembering if
# we don't have a result yet.
if rv is None:
for group_size in 5, 4, 3:
if len(num) % group_size == 0:
rv = "-".join(
num[x : x + group_size].rjust(group_size, "0")
for x in range(0, len(num), group_size)
)
break
else:
rv = num
return rv, cookie_name
class DebuggedApplication(object):
"""Enables debugging support for a given application::
from werkzeug.debug import DebuggedApplication
from myapp import app
app = DebuggedApplication(app, evalex=True)
The `evalex` keyword argument allows evaluating expressions in a
traceback's frame context.
:param app: the WSGI application to run debugged.
:param evalex: enable exception evaluation feature (interactive
debugging). This requires a non-forking server.
:param request_key: The key that points to the request object in ths
environment. This parameter is ignored in current
versions.
:param console_path: the URL for a general purpose console.
:param console_init_func: the function that is executed before starting
the general purpose console. The return value
is used as initial namespace.
:param show_hidden_frames: by default hidden traceback frames are skipped.
You can show them by setting this parameter
to `True`.
:param pin_security: can be used to disable the pin based security system.
:param pin_logging: enables the logging of the pin system.
"""
def __init__(
self,
app,
evalex=False,
request_key="werkzeug.request",
console_path="/console",
console_init_func=None,
show_hidden_frames=False,
pin_security=True,
pin_logging=True,
):
if not console_init_func:
console_init_func = None
self.app = app
self.evalex = evalex
self.frames = {}
self.tracebacks = {}
self.request_key = request_key
self.console_path = console_path
self.console_init_func = console_init_func
self.show_hidden_frames = show_hidden_frames
self.secret = gen_salt(20)
self._failed_pin_auth = 0
self.pin_logging = pin_logging
if pin_security:
# Print out the pin for the debugger on standard out.
if os.environ.get("WERKZEUG_RUN_MAIN") == "true" and pin_logging:
_log("warning", " * Debugger is active!")
if self.pin is None:
_log(
"warning",
" * Debugger PIN disabled. DEBUGGER UNSECURED!",
)
else:
_log("info", " * Debugger PIN: %s" % self.pin)
else:
self.pin = None
@property
def pin(self):
if not hasattr(self, "_pin"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin
@pin.setter
def pin(self, value):
self._pin = value
@property
def pin_cookie_name(self):
"""The name of the pin cookie."""
if not hasattr(self, "_pin_cookie"):
self._pin, self._pin_cookie = get_pin_and_cookie_name(self.app)
return self._pin_cookie
def debug_application(self, environ, start_response):
"""Run the application and conserve the traceback frames."""
app_iter = None
try:
app_iter = self.app(environ, start_response)
for item in app_iter:
yield item
if hasattr(app_iter, "close"):
app_iter.close()
except Exception:
if hasattr(app_iter, "close"):
app_iter.close()
traceback = get_current_traceback(
skip=1,
show_hidden_frames=self.show_hidden_frames,
ignore_system_exceptions=True,
)
for frame in traceback.frames:
self.frames[frame.id] = frame
self.tracebacks[traceback.id] = traceback
try:
start_response(
"500 INTERNAL SERVER ERROR",
[
("Content-Type", "text/html; charset=utf-8"),
# Disable Chrome's XSS protection, the debug
# output can cause false-positives.
("X-XSS-Protection", "0"),
],
)
except Exception:
# if we end up here there has been output but an error
# occurred. in that situation we can do nothing fancy any
# more, better log something into the error log and fall
# back gracefully.
environ["wsgi.errors"].write(
"Debugging middleware caught exception in streamed "
"response at a point where response headers were already "
"sent.\n"
)
else:
is_trusted = bool(self.check_pin_trust(environ))
yield traceback.render_full(
evalex=self.evalex,
evalex_trusted=is_trusted,
secret=self.secret,
).encode("utf-8", "replace")
traceback.log(environ["wsgi.errors"])
def execute_command(self, request, command, frame):
"""Execute a command in a console."""
return Response(frame.console.eval(command), mimetype="text/html")
def display_console(self, request):
"""Display a standalone shell."""
if 0 not in self.frames:
if self.console_init_func is None:
ns = {}
else:
ns = dict(self.console_init_func())
ns.setdefault("app", self.app)
self.frames[0] = _ConsoleFrame(ns)
is_trusted = bool(self.check_pin_trust(request.environ))
return Response(
render_console_html(secret=self.secret, evalex_trusted=is_trusted),
mimetype="text/html",
)
def paste_traceback(self, request, traceback):
"""Paste the traceback and return a JSON response."""
rv = traceback.paste()
return Response(json.dumps(rv), mimetype="application/json")
def get_resource(self, request, filename):
"""Return a static resource from the shared folder."""
filename = join("shared", basename(filename))
try:
data = pkgutil.get_data(__package__, filename)
except OSError:
data = None
if data is not None:
mimetype = (
mimetypes.guess_type(filename)[0] or "application/octet-stream"
)
return Response(data, mimetype=mimetype)
return Response("Not Found", status=404)
def check_pin_trust(self, environ):
"""Checks if the request passed the pin test. This returns `True` if the
request is trusted on a pin/cookie basis and returns `False` if not.
Additionally if the cookie's stored pin hash is wrong it will return
`None` so that appropriate action can be taken.
"""
if self.pin is None:
return True
val = parse_cookie(environ).get(self.pin_cookie_name)
if not val or "|" not in val:
return False
ts, pin_hash = val.split("|", 1)
if not ts.isdigit():
return False
if pin_hash != hash_pin(self.pin):
return None
return (time.time() - PIN_TIME) < int(ts)
def _fail_pin_auth(self):
time.sleep(5.0 if self._failed_pin_auth > 5 else 0.5)
self._failed_pin_auth += 1
def pin_auth(self, request):
"""Authenticates with the pin."""
exhausted = False
auth = False
trust = self.check_pin_trust(request.environ)
# If the trust return value is `None` it means that the cookie is
# set but the stored pin hash value is bad. This means that the
# pin was changed. In this case we count a bad auth and unset the
# cookie. This way it becomes harder to guess the cookie name
# instead of the pin as we still count up failures.
bad_cookie = False
if trust is None:
self._fail_pin_auth()
bad_cookie = True
# If we're trusted, we're authenticated.
elif trust:
auth = True
# If we failed too many times, then we're locked out.
elif self._failed_pin_auth > 10:
exhausted = True
# Otherwise go through pin based authentication
else:
entered_pin = request.args.get("pin")
if entered_pin.strip().replace("-", "") == self.pin.replace(
"-", ""
):
self._failed_pin_auth = 0
auth = True
else:
self._fail_pin_auth()
rv = Response(
json.dumps({"auth": auth, "exhausted": exhausted}),
mimetype="application/json",
)
if auth:
rv.set_cookie(
self.pin_cookie_name,
"%s|%s" % (int(time.time()), hash_pin(self.pin)),
httponly=True,
)
elif bad_cookie:
rv.delete_cookie(self.pin_cookie_name)
return rv
def log_pin_request(self):
"""Log the pin if needed."""
if self.pin_logging and self.pin is not None:
_log(
"info",
" * To enable the debugger you need to enter the security pin:",
)
_log("info", " * Debugger pin code: %s" % self.pin)
return Response("")
def __call__(self, environ, start_response):
"""Dispatch the requests."""
# important: don't ever access a function here that reads the incoming
# form data! Otherwise the application won't have access to that data
# any more!
request = Request(environ)
response = self.debug_application
if request.args.get("__debugger__") == "yes":
cmd = request.args.get("cmd")
arg = request.args.get("f")
secret = request.args.get("s")
traceback = self.tracebacks.get(request.args.get("tb", type=int))
frame = self.frames.get(request.args.get("frm", type=int))
if cmd == "resource" and arg:
response = self.get_resource(request, arg)
elif (
cmd == "paste"
and traceback is not None
and secret == self.secret
):
response = self.paste_traceback(request, traceback)
elif cmd == "pinauth" and secret == self.secret:
response = self.pin_auth(request)
elif cmd == "printpin" and secret == self.secret:
response = self.log_pin_request()
elif (
self.evalex
and cmd is not None
and frame is not None
and self.secret == secret
and self.check_pin_trust(environ)
):
response = self.execute_command(request, cmd, frame)
elif (
self.evalex
and self.console_path is not None
and request.path == self.console_path
):
response = self.display_console(request)
return response(environ, start_response)
| [
"[email protected]"
] | |
54601c3faba97921513238671d4defe422ee9d46 | d3eb732ffd738d3a624196f0971e4c29f85f6673 | /maptool.py | 57b5b053df938d8e44ecddd90a5bd11d4c5471b6 | [] | no_license | kailIII/mgrs-tools | c44aae9542e9883e9e1a395217b468bea4fb0788 | 3ac612bdf980f2d61f27d417c709115890af415f | refs/heads/master | 2021-01-15T16:57:14.768002 | 2015-04-01T12:15:10 | 2015-04-01T12:15:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | import mgrs
from qgis.core import *
from qgis.gui import *
from qgis.utils import iface
from PyQt4.QtCore import *
class MGRSMapTool(QgsMapTool):
ct = mgrs.MGRS()
epsg4326 = QgsCoordinateReferenceSystem("EPSG:4326")
def __init__(self, canvas):
QgsMapTool.__init__(self, canvas)
self.setCursor(Qt.CrossCursor)
def canvasMoveEvent(self, e):
pt = self.toMapCoordinates(e.pos())
canvas = iface.mapCanvas()
canvasCrs = canvas.mapRenderer().destinationCrs()
transform = QgsCoordinateTransform(canvasCrs, self.epsg4326)
pt4326 = transform.transform(pt.x(), pt.y())
try:
mgrsCoords = self.ct.toMGRS(pt4326.y(), pt4326.x())
iface.mainWindow().statusBar().showMessage("MGRS Coordinate: " + mgrsCoords)
except:
iface.mainWindow().statusBar().showMessage("")
| [
"[email protected]"
] | |
c3af8fef67afd6550242c8ca323ebe060625aa59 | 0536e3c635c300a999764dba6f8cd766eeab95f2 | /uni_ticket/urls.py | 652787eb129ab484d29d304cbbaedde7ce73da93 | [
"Apache-2.0"
] | permissive | mspasiano/uniTicket | 57b7d4a6f2550529f37ecc6d685bd386e98590d3 | 1e8e4c2274293e751deea5b8b1fb4116136c5641 | refs/heads/master | 2020-12-02T20:28:47.297929 | 2020-01-10T11:03:43 | 2020-01-10T11:03:43 | 231,111,874 | 0 | 0 | Apache-2.0 | 2019-12-31T15:40:50 | 2019-12-31T15:40:49 | null | UTF-8 | Python | false | false | 15,976 | py | from django.contrib import admin
from django.contrib.auth import views as auth_views
from django.urls import include, path, re_path
from django.utils.text import slugify
from django.views.generic import RedirectView
from . decorators import is_manager, is_operator, is_the_owner
from . settings import MANAGEMENT_URL_PREFIX
from . views import (datatables, generic, management,
manager, operator, user)
app_name="uni_ticket"
_dashboard_name = 'dashboard'
# System/Generic URLs
ticket = 'ticket/<str:ticket_id>'
urlpatterns = [
path('', RedirectView.as_view(url='/{}/'.format(_dashboard_name))),
# Router url di responsabilità su struttura (manager/operator/user)
re_path(r'^manage/(?:(?P<structure_slug>[-\w]+))?$', generic.manage, name='manage'),
# Attachments download
path('{}/download/attachment/<str:attachment>/'.format(ticket), generic.download_attachment, name='download_attachment'),
path('{}/reply/<str:reply_id>/download/attachment/'.format(ticket), generic.download_message_attachment, name='download_message_attachment'),
path('{}/task/<str:task_id>/download/attachment/'.format(ticket), generic.download_task_attachment, name='download_task_attachment'),
# Delete ticket message
path('messages/delete/<str:ticket_message_id>/', generic.ticket_message_delete, name='message_delete'),
path('email-notify/update/', generic.email_notify_change, name='email_notify_change'),
path('print/ticket/<str:ticket_id>/', generic.ticket_detail_print, name='ticket_detail_print'),
]
# Datatables URLs
structure = '<str:structure_slug>'
urlpatterns += [
# User json
path('user_all_tickets.json', datatables.user_all_tickets, name='user_all_tickets_json'),
path('user_opened_ticket.json', datatables.user_opened_ticket, name='user_opened_ticket_json'),
path('user_closed_ticket.json', datatables.user_closed_ticket, name='user_closed_ticket_json'),
path('user_unassigned_ticket.json', datatables.user_unassigned_ticket, name='user_unassigned_ticket_json'),
# Manager json
path('{}/manager_unassigned_ticket.json'.format(structure), datatables.manager_unassigned_ticket, name='manager_unassigned_ticket_json'),
path('{}/manager_opened_ticket.json'.format(structure), datatables.manager_opened_ticket, name='manager_opened_ticket_json'),
path('{}/manager_closed_ticket.json'.format(structure), datatables.manager_closed_ticket, name='manager_closed_ticket_json'),
path('{}/manager_not_closed_ticket.json'.format(structure), datatables.manager_not_closed_ticket, name='manager_not_closed_ticket_json'),
# Operator json
path('{}/operator_unassigned_ticket.json'.format(structure), datatables.operator_unassigned_ticket, name='operator_unassigned_ticket_json'),
path('{}/operator_opened_ticket.json'.format(structure), datatables.operator_opened_ticket, name='operator_opened_ticket_json'),
path('{}/operator_closed_ticket.json'.format(structure), datatables.operator_closed_ticket, name='operator_closed_ticket_json'),
path('{}/operator_not_closed_ticket.json'.format(structure), datatables.operator_not_closed_ticket, name='operator_not_closed_ticket_json'),
]
# Management URLs (manager and operator)
base = 'manage/<str:structure_slug>'
tickets = '{}/tickets'.format(base)
ticket = '{}/ticket'.format(tickets)
ticket_id = '{}/<str:ticket_id>'.format(ticket)
task = '{}/task'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
urlpatterns += [
# Ticket
path('{}/opened/'.format(tickets), management.manage_opened_ticket_url, name='manage_opened_ticket_url'),
path('{}/unassigned/'.format(tickets), management.manage_unassigned_ticket_url, name='manage_unassigned_ticket_url'),
path('{}/closed/'.format(tickets), management.manage_closed_ticket_url, name='manage_closed_ticket_url'),
path('{}/'.format(tickets), management.manage_not_closed_ticket_url, name='manage_not_closed_ticket_url'),
path('{}/'.format(ticket), management.manage_ticket_url, name='manage_ticket_url'),
path('{}/'.format(ticket_id), management.manage_ticket_url_detail, name='manage_ticket_url_detail'),
path('{}/messages/'.format(ticket_id), management.ticket_message_url, name='manage_ticket_message_url'),
path('{}/competence/add/'.format(ticket_id), management.ticket_competence_add_url, name='add_ticket_competence_url'),
path('{}/dependence/add/'.format(ticket_id), management.ticket_dependence_add_url, name='add_ticket_dependence_url'),
path('{}/dependence/remove/<str:master_ticket_id>/'.format(ticket_id), management.ticket_dependence_remove, name='remove_ticket_dependence'),
path('{}/take/'.format(ticket_id), management.ticket_take, name='prendi_ticket_in_carico'),
path('{}/close/'.format(ticket_id), management.ticket_close_url, name='close_ticket'),
path('{}/reopen/'.format(ticket_id), management.ticket_reopen, name='reopen_ticket'),
# Task
path('{}/add/'.format(task), management.task_add_new_url, name='add_ticket_task_url'),
path('{}/'.format(task_id), management.task_detail_url, name='manage_task_detail_url'),
path('{}/close/'.format(task_id), management.task_close_url, name='close_task'),
path('{}/delete/'.format(task_id), management.task_remove, name='task_remove'),
path('{}/riapri/'.format(task_id), management.task_reopen, name='reopen_task'),
path('{}/edit/remove-attachment/'.format(task_id), management.task_attachment_delete, name='manage_elimina_allegato_task'),
path('{}/edit/'.format(task_id), management.task_edit_url, name='edit_task'),
]
# Manager URLs
base = '{}/<str:structure_slug>'.format(slugify(MANAGEMENT_URL_PREFIX['manager']))
tickets = '{}/tickets'.format(base)
ticket_id = '{}/ticket/<str:ticket_id>'.format(tickets)
task = '{}/activities'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
offices = '{}/offices'.format(base)
office = '{}/office'.format(offices)
office_id = '{}/<str:office_slug>'.format(office)
categories = '{}/categories'.format(base)
category = '{}/category'.format(categories)
category_id = '{}/<str:category_slug>'.format(category)
cat_input = '{}/input'.format(category_id)
cat_input_id = '{}/<int:module_id>'.format(cat_input)
condition = '{}/conditions/condition'.format(category_id)
condition_id = '{}/<int:condition_id>'.format(condition)
urlpatterns += [
path('{}/{}/'.format(base, _dashboard_name), manager.dashboard, name='manager_dashboard'),
# Ticket
path('{}/opened/'.format(tickets), is_manager(generic.opened_ticket), name='manager_opened_ticket'),
path('{}/unassigned/'.format(tickets), is_manager(generic.unassigned_ticket), name='manager_unassigned_ticket'),
path('{}/closed/'.format(tickets), is_manager(generic.closed_ticket), name='manager_closed_ticket'),
path('{}/'.format(tickets), is_manager(management.tickets), name='manager_tickets'),
path('{}/'.format(ticket_id), is_manager(management.ticket_detail), name='manager_manage_ticket'),
path('{}/messages/'.format(ticket_id), is_manager(management.ticket_message), name='manager_ticket_message'),
path('{}/competence/add/'.format(ticket_id), is_manager(management.ticket_competence_add_new), name='manager_add_ticket_competence'),
path('{}/competence/add/<str:str_slug>/'.format(ticket_id), is_manager(management.ticket_competence_add_final), name='manager_add_ticket_competence'),
path('{}/dependence/add/'.format(ticket_id), is_manager(management.ticket_dependence_add_new), name='manager_add_ticket_dependence'),
path('{}/close/'.format(ticket_id), is_manager(management.ticket_close), name='manager_close_ticket'),
# Task
path('{}/add/'.format(task), is_manager(management.task_add_new), name='manager_add_ticket_task'),
path('{}/'.format(task_id), is_manager(management.task_detail), name='manager_task_detail'),
path('{}/close/'.format(task_id), is_manager(management.task_close), name='manager_close_task'),
path('{}/edit/'.format(task_id), is_manager(management.task_edit), name='manager_edit_task'),
# Offices
path('{}/new/'.format(office), manager.office_add_new, name='manager_office_add_new'),
path('{}/'.format(office_id), manager.office_detail, name='manager_office_detail'),
path('{}/edit/'.format(office_id), manager.office_edit, name='manager_office_edit'),
path('{}/remove-operator/<int:employee_id>/'.format(office_id), manager.office_remove_operator, name='manager_remove_office_operator'),
path('{}/add-category/'.format(office_id), manager.office_add_category, name='manager_add_office_category'),
path('{}/remove-category/<str:category_slug>/'.format(office_id), manager.office_remove_category, name='manager_remove_office_category'),
path('{}/disable/'.format(office_id), manager.office_disable, name='manager_disable_office'),
path('{}/enable/'.format(office_id), manager.office_enable, name='manager_enable_office'),
path('{}/delete/'.format(office_id), manager.office_delete, name='manager_delete_office'),
path('{}/'.format(offices), manager.offices, name='manager_offices'),
# Categories
path('{}/'.format(categories), manager.categories, name='manager_categories'),
path('{}/new/'.format(category), manager.category_add_new, name='manager_category_add_new'),
path('{}/'.format(category_id), manager.category_detail, name='manager_category_detail'),
path('{}/edit/'.format(category_id), manager.category_edit, name='manager_category_edit'),
path('{}/disable/'.format(category_id), manager.category_disable, name='manager_disable_category'),
path('{}/enable/'.format(category_id), manager.category_enable, name='manager_enable_category'),
path('{}/delete/'.format(category_id), manager.category_delete, name='manager_delete_category'),
path('{}/new/'.format(category_id).format(cat_input), manager.category_input_module_new, name='manager_category_new_input_module'),
# Category input modules
path('{}/'.format(cat_input_id), manager.category_input_module_details, name='manager_category_input_module'),
path('{}/edit/'.format(cat_input_id), manager.category_input_module_edit, name='manager_category_input_module_edit'),
path('{}/enable/'.format(cat_input_id), manager.category_input_module_enable, name='manager_category_input_module_enable'),
path('{}/disable/'.format(cat_input_id), manager.category_input_module_disable, name='manager_category_input_module_disable'),
path('{}/delete/'.format(cat_input_id), manager.category_input_module_delete, name='manager_category_input_module_delete'),
path('{}/preview/'.format(cat_input_id), manager.category_input_module_preview, name='manager_category_input_module_preview'),
path('{}/field/<int:field_id>/delete/'.format(cat_input_id), manager.category_input_field_delete, name='manager_category_input_field_delete'),
path('{}/field/<int:field_id>/edit/'.format(cat_input_id), manager.category_input_field_edit, name='manager_category_input_field_edit'),
# Category conditions
path('{}/new/'.format(condition), manager.category_condition_new, name='manager_category_condition_new'),
path('{}/edit/'.format(condition_id), manager.category_condition_edit, name='manager_category_condition_edit'),
path('{}/delete/'.format(condition_id), manager.category_condition_delete, name='manager_category_condition_delete'),
path('{}/disable/'.format(condition_id), manager.category_condition_disable, name='manager_category_condition_disable'),
path('{}/enable/'.format(condition_id), manager.category_condition_enable, name='manager_category_condition_enable'),
path('{}/'.format(condition_id), manager.category_condition_detail, name='manager_category_condition_detail'),
path('{}/remove-office/<str:office_slug>/'.format(category_id), manager.category_remove_office, name='manager_remove_category_office'),
path('{}/settings/'.format(base), is_manager(generic.user_settings), name='manager_user_settings'),
path('{}/messages/'.format(base), is_manager(generic.ticket_messages), name='manager_messages'),
]
# Operator URLs
base = '{}/<str:structure_slug>'.format(slugify(MANAGEMENT_URL_PREFIX['operator']))
tickets = '{}/tickets'.format(base)
ticket_id = '{}/ticket/<str:ticket_id>'.format(tickets)
task = '{}/activities'.format(ticket_id)
task_id = '{}/<str:task_id>'.format(task)
urlpatterns += [
path('{}/{}/'.format(base, _dashboard_name), operator.dashboard, name='operator_dashboard'),
# Ticket
path('{}/opened/'.format(tickets), is_operator(generic.opened_ticket), name='operator_opened_ticket'),
path('{}/unassigned/'.format(tickets), is_operator(generic.unassigned_ticket), name='operator_unassigned_ticket'),
path('{}/closed/'.format(tickets), is_operator(generic.closed_ticket), name='operator_closed_ticket'),
path('{}/'.format(tickets), is_operator(management.tickets), name='operator_tickets'),
path('{}/'.format(ticket_id), is_operator(management.ticket_detail), name='operator_manage_ticket'),
path('{}/messages/'.format(ticket_id), is_operator(management.ticket_message), name='operator_ticket_message'),
path('{}/competence/add/'.format(ticket_id), is_operator(management.ticket_competence_add_new), name='operator_add_ticket_competence'),
path('{}/competence/add/<str:str_slug>/'.format(ticket_id), is_operator(management.ticket_competence_add_final), name='operator_add_ticket_competence'),
path('{}/dependence/add/'.format(ticket_id), is_operator(management.ticket_dependence_add_new), name='operator_add_ticket_dependence'),
path('{}/close/'.format(ticket_id), is_operator(management.ticket_close), name='operator_close_ticket'),
# Task
path('{}/add/'.format(task), is_operator(management.task_add_new), name='operator_add_ticket_task'),
path('{}/'.format(task_id), is_operator(management.task_detail), name='operator_task_detail'),
path('{}/close/'.format(task_id), is_operator(management.task_close), name='operator_close_task'),
path('{}/edit/'.format(task_id), is_operator(management.task_edit), name='operator_edit_task'),
path('{}/settings/'.format(base), is_operator(generic.user_settings), name='operator_user_settings'),
path('{}/messages/'.format(base), is_operator(generic.ticket_messages), name='operator_messages'),
]
# User URLs
tickets = 'tickets'
ticket = '{}/ticket'.format(tickets)
ticket_id = '{}/<str:ticket_id>'.format(ticket)
urlpatterns += [
path('{}/'.format(_dashboard_name), user.dashboard, name='user_dashboard'),
path('{}/opened/'.format(tickets), generic.opened_ticket, name='user_opened_ticket'),
path('{}/unassigned/'.format(tickets), generic.unassigned_ticket, name='user_unassigned_ticket'),
path('{}/closed/'.format(tickets), generic.closed_ticket, name='user_closed_ticket'),
path('{}/'.format(ticket), user.ticket_url, name='user_ticket_url'),
path('{}/new/'.format(ticket), user.ticket_new_preload, name='new_ticket_preload'),
path('{}/new/<str:struttura_slug>/'.format(ticket), user.ticket_new_preload, name='new_ticket_preload'),
path('{}/new/<str:struttura_slug>/<str:categoria_slug>/'.format(ticket), user.ticket_add_new, name='add_new_ticket'),
path('{}/messages/'.format(ticket_id), user.ticket_message, name='ticket_message'),
path('{}/edit/'.format(ticket_id), user.ticket_edit, name='ticket_edit'),
path('{}/edit/remove-attachment/<str:attachment>/'.format(ticket_id), user.delete_my_attachment, name='delete_my_attachment'),
path('{}/delete/'.format(ticket_id), user.ticket_delete, name='elimina_ticket'),
path('{}/close/'.format(ticket_id), user.ticket_close, name='user_close_ticket'),
path('{}/activity/<str:task_id>/'.format(ticket_id), user.task_detail, name='task_detail'),
path('{}/'.format(ticket_id), is_the_owner(user.ticket_detail), name='ticket_detail'),
path('settings/', generic.user_settings, name='user_settings'),
path('messages/', generic.ticket_messages, name='messages'),
]
| [
"[email protected]"
] | |
08b0a728944265f677ec74dadd71c4ada25f038e | c86cd75be4f5b4eef605fb0f40743406ae19685f | /asdl/typed_arith_parse.py | 3153714975fcab1c9e004bd1f6229ff6f08b8ae4 | [
"Apache-2.0"
] | permissive | jyn514/oil | 3de53092c81e7f9129c9d12d51a8dfdbcacd397b | 42adba6a1668ff30c6312a6ce3c3d1f1acd529ec | refs/heads/master | 2022-02-23T08:12:48.381272 | 2019-03-15T08:54:31 | 2019-03-15T08:54:31 | 176,316,917 | 0 | 0 | Apache-2.0 | 2019-03-18T15:36:14 | 2019-03-18T15:36:13 | null | UTF-8 | Python | false | false | 8,508 | py | #!/usr/bin/env python
"""
typed_arith_parse.py: Parse shell-like and C-like arithmetic.
"""
from __future__ import print_function
import sys
from _devbuild.gen.typed_arith_asdl import (
arith_expr, arith_expr_e, arith_expr_t,
arith_expr__Binary, arith_expr__FuncCall, arith_expr__Const)
from typing import Dict, List, Optional, Union, cast
from asdl import tdop
from asdl.tdop import Parser
from asdl.tdop import ParserSpec
Token = tdop.Token
#
# Null Denotation -- token that takes nothing on the left
#
def NullConstant(p, # type: Parser
token, # type: Token
bp, # type: int
):
# type: (...) -> arith_expr_t
if token.type == 'number':
return arith_expr.Const(int(token.val))
# We have to wrap a string in some kind of variant.
if token.type == 'name':
return arith_expr.Var(token.val)
raise AssertionError(token.type)
def NullParen(p, # type: Parser
token, # type: Token
bp, # type: int
):
# type: (...) -> arith_expr_t
""" Arithmetic grouping """
r = p.ParseUntil(bp)
p.Eat(')')
return r
def NullPrefixOp(p, token, bp):
# type: (Parser, Token, int) -> arith_expr_t
"""Prefix operator.
Low precedence: return, raise, etc.
return x+y is return (x+y), not (return x) + y
High precedence: logical negation, bitwise complement, etc.
!x && y is (!x) && y, not !(x && y)
"""
r = p.ParseUntil(bp)
return arith_expr.Unary(token.val, r)
def NullIncDec(p, token, bp):
# type: (Parser, Token, int) -> arith_expr_t
""" ++x or ++x[1] """
right = p.ParseUntil(bp)
if not isinstance(right, (arith_expr.Var, arith_expr.Index)):
raise tdop.ParseError("Can't assign to %r" % right)
return arith_expr.Unary(token.val, right)
#
# Left Denotation -- token that takes an expression on the left
#
def LeftIncDec(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
rbp, # type: int
):
# type: (...) -> arith_expr_t
""" For i++ and i--
"""
if not isinstance(left, (arith_expr.Var, arith_expr.Index)):
raise tdop.ParseError("Can't assign to %r" % left)
token.type = 'post' + token.type
return arith_expr.Unary(token.val, left)
def LeftIndex(p, token, left, unused_bp):
# type: (Parser, Token, arith_expr_t, int) -> arith_expr_t
""" index f[x+1] """
# f[x] or f[x][y]
if not isinstance(left, arith_expr.Var):
raise tdop.ParseError("%s can't be indexed" % left)
index = p.ParseUntil(0)
if p.AtToken(':'):
p.Next()
end = p.ParseUntil(0) # type: Union[arith_expr_t, None]
else:
end = None
p.Eat(']')
# TODO: If you see ], then
# 1:4
# 1:4:2
# Both end and step are optional
if end:
return arith_expr.Slice(left, index, end, None)
else:
return arith_expr.Index(left, index)
def LeftTernary(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
bp, # type: int
):
# type: (...) -> arith_expr_t
""" e.g. a > 1 ? x : y """
true_expr = p.ParseUntil(bp)
p.Eat(':')
false_expr = p.ParseUntil(bp)
return arith_expr.Ternary(left, true_expr, false_expr)
def LeftBinaryOp(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
rbp, # type: int
):
# type: (...) -> arith_expr__Binary
""" Normal binary operator like 1+2 or 2*3, etc. """
return arith_expr.Binary(token.val, left, p.ParseUntil(rbp))
def LeftAssign(p, # type: Parser
token, # type: Token
left, # type: arith_expr_t
rbp, # type: int
):
# type: (...) -> arith_expr__Binary
""" Normal binary operator like 1+2 or 2*3, etc. """
# x += 1, or a[i] += 1
if not isinstance(left, (arith_expr.Var, arith_expr.Index)):
raise tdop.ParseError("Can't assign to %r" % left)
node = arith_expr.Binary(token.val, left, p.ParseUntil(rbp))
# For TESTING
node.spids.append(42)
node.spids.append(43)
return node
# For overloading of , inside function calls
COMMA_PREC = 1
def LeftFuncCall(p, token, left, unused_bp):
# type: (Parser, Token, arith_expr_t, int) -> arith_expr__FuncCall
""" Function call f(a, b). """
args = []
# f(x) or f[i](x)
if not isinstance(left, arith_expr.Var):
raise tdop.ParseError("%s can't be called" % left)
func_name = left.name # get a string
while not p.AtToken(')'):
# We don't want to grab the comma, e.g. it is NOT a sequence operator. So
# set the precedence to 5.
args.append(p.ParseUntil(COMMA_PREC))
if p.AtToken(','):
p.Next()
p.Eat(")")
return arith_expr.FuncCall(func_name, args)
def MakeShellParserSpec():
# type: () -> ParserSpec
"""
Create a parser.
Compare the code below with this table of C operator precedence:
http://en.cppreference.com/w/c/language/operator_precedence
"""
spec = tdop.ParserSpec()
spec.Left(31, LeftIncDec, ['++', '--'])
spec.Left(31, LeftFuncCall, ['('])
spec.Left(31, LeftIndex, ['['])
# 29 -- binds to everything except function call, indexing, postfix ops
spec.Null(29, NullIncDec, ['++', '--'])
spec.Null(29, NullPrefixOp, ['+', '!', '~', '-'])
# Right associative: 2 ** 3 ** 2 == 2 ** (3 ** 2)
spec.LeftRightAssoc(27, LeftBinaryOp, ['**'])
spec.Left(25, LeftBinaryOp, ['*', '/', '%'])
spec.Left(23, LeftBinaryOp, ['+', '-'])
spec.Left(21, LeftBinaryOp, ['<<', '>>'])
spec.Left(19, LeftBinaryOp, ['<', '>', '<=', '>='])
spec.Left(17, LeftBinaryOp, ['!=', '=='])
spec.Left(15, LeftBinaryOp, ['&'])
spec.Left(13, LeftBinaryOp, ['^'])
spec.Left(11, LeftBinaryOp, ['|'])
spec.Left(9, LeftBinaryOp, ['&&'])
spec.Left(7, LeftBinaryOp, ['||'])
spec.LeftRightAssoc(5, LeftTernary, ['?'])
# Right associative: a = b = 2 is a = (b = 2)
spec.LeftRightAssoc(3, LeftAssign, [
'=',
'+=', '-=', '*=', '/=', '%=',
'<<=', '>>=', '&=', '^=', '|='])
spec.Left(COMMA_PREC, LeftBinaryOp, [','])
# 0 precedence -- doesn't bind until )
spec.Null(0, NullParen, ['(']) # for grouping
# -1 precedence -- never used
spec.Null(-1, NullConstant, ['name', 'number'])
spec.Null(-1, tdop.NullError, [')', ']', ':', 'eof'])
return spec
def MakeParser(s):
# type: (str) -> Parser
"""Used by tests."""
spec = MakeShellParserSpec()
lexer = tdop.Tokenize(s)
p = tdop.Parser(spec, lexer)
return p
def ParseShell(s, expected=None):
# type: (str, Optional[str]) -> arith_expr_t
"""Used by tests."""
p = MakeParser(s)
tree = p.Parse()
sexpr = repr(tree)
if expected is not None:
assert sexpr == expected, '%r != %r' % (sexpr, expected)
#print('%-40s %s' % (s, sexpr))
return tree
class Evaluator(object):
def __init__(self):
# type: () -> None
self.mem = {} # type: Dict[str, int]
def Eval(self, node):
# type: (arith_expr_t) -> int
"""Use the isinstance() style for comparison."""
if isinstance(node, arith_expr__Const):
assert node.i is not None
return node.i
if isinstance(node, arith_expr__Binary):
assert node.left is not None
assert node.right is not None
left = self.Eval(node.left)
right = self.Eval(node.right)
op = node.op
if op == '+':
return left + right
return 3
def Eval2(self, node):
# type: (arith_expr_t) -> int
tag = node.tag
if tag == arith_expr_e.Const:
n = cast(arith_expr__Const, node)
assert n.i is not None
return n.i
if tag == arith_expr_e.Binary:
n2 = cast(arith_expr__Binary, node)
assert n2.left is not None
assert n2.right is not None
left = self.Eval(n2.left)
right = self.Eval(n2.right)
op = n2.op
if op == '+':
return left + right
return 3
def main(argv):
# type: (List[str]) -> int
try:
action = argv[1]
s = argv[2]
except IndexError:
print('Usage: ./arith_parse.py ACTION EXPRESSION')
return 2
try:
node = ParseShell(s)
except tdop.ParseError as e:
print('Error parsing %r: %s' % (s, e), file=sys.stderr)
if action == 'parse':
print(node)
elif action == 'eval':
ev = Evaluator()
result = ev.Eval(node)
print(node)
print(' => ')
print(result)
else:
print('Invalid action %r' % action)
return 2
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"[email protected]"
] | |
7ceceed258eb306cbc6fee57056ca756971ba8da | df1cb33bfe99a1e72cf75931749163b7c8731757 | /stages/stage3.py | 012d626c02d661dbc7a2f17848fc0e501c06bcb9 | [] | no_license | orf/wikilink_py | 2d6ae9dd64264fdf17995980ed8a4a960c199c5b | 6643397e220970a93dab1e50e120748bfdc3bf19 | refs/heads/master | 2021-01-22T11:55:16.906965 | 2014-01-08T20:49:38 | 2014-01-08T20:49:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,415 | py | from lib.progress import run_with_progressbar
from lib.formatters.Neo4jFormatter import Neo4jFormatter
from lib.formatters.CSVFormatter import MultiCSVFormatter
import functools
import os
import logging
import sys
import itertools
import __pypy__
import json
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
logger.setLevel(logging.INFO)
STAGE3_TITLES_TO_ID = {}
STAGE3_ID_TO_DATA = {}
FLAG_REDIRECT = 1
FLAG_SEEN = 2
def handle_stage1_line(line):
# There is one page in stage1.csv who's title is a unicode NEXT_LINE character (\x85).
# As such we have to encode each line individually.
# https://en.wikipedia.org/w/api.php?action=query&prop=info&pageids=28644448&inprop=url
page_id, page_title, is_redirect = unicode(line.strip("\n"), "utf-8").split("|")
flags = FLAG_REDIRECT if is_redirect == "1" else 0
STAGE3_TITLES_TO_ID[page_title] = int(page_id)
STAGE3_ID_TO_DATA[int(page_id)] = (page_title, flags)
#yield (page_title, flags), int(page_id)
def get_ids_from_titles(titles_list, get_none=False):
"""
I take a list of titles and return a list of integer ID's. If get_none is True then
the return list will contain None values where the title cannot be found.
"""
returner = []
for title in titles_list:
x = STAGE3_TITLES_TO_ID.get(title, 0)
if x is not 0 or get_none is True:
returner.append(x) # Keeping all elements uniform might increase performance
return returner
def get_page_data_from_id(page_id, update_seen=True):
"""
I take a page ID and I return a tuple containing the title, is_redirect flag and a value indicating if this
page ID has been queried before.
"""
p_data = STAGE3_ID_TO_DATA.get(page_id, None)
if p_data is None:
return None
if update_seen:
STAGE3_ID_TO_DATA[page_id] = (p_data[0], p_data[1] | FLAG_SEEN)
return p_data
def set_page_redirect(title, to):
"""
I replace a page title with the ID of the page it links to
"""
STAGE3_TITLES_TO_ID[title] = to
def delete_page(title, page_id):
"""
I take a page ID and/or I delete it from our registry
"""
if title:
del STAGE3_TITLES_TO_ID[title]
if page_id:
del STAGE3_ID_TO_DATA[page_id]
def split_page_info(line, update_seen=True, get_none=False, get_links=True):
"""
I take a line outputted from Stage2 and I return (the_id, page_links, page_info)
"""
line = line.rstrip("\n")
split_line = line.split("|")
page_id = int(split_line[0])
page_info = get_page_data_from_id(page_id, update_seen=update_seen)
if page_info is None:
return None, None, None
# Using islice like this keeps memory down by avoiding creating another list, it also doens't need a len() call
# so it might be faster. whatever.
page_links = itertools.islice(split_line, 1, sys.maxint)
return page_id, get_ids_from_titles(page_links, get_none) if get_links else page_links, page_info
def stage3_pre(line):
"""
We need to sort out redirects so they point to the correct pages. We do this by
loading stage2.csv which contains ID|link_title|link_title... and get the ID's of the links
"""
page_id, page_links, page_info = split_page_info(unicode(line, "utf-8"), update_seen=False, get_links=False)
if page_info and page_info[1] & FLAG_REDIRECT: # Are we a redirect?
page_links = get_ids_from_titles(page_links, True)
page_title = page_info[0]
if len(page_links) > 1 and page_links[0]:
# Point the redirect page to the ID of the page it redirects to
set_page_redirect(page_title, page_links[0])
delete_page(None, page_id)
else:
# The page we are redirecting to cannot be found, remove the redirect page.
delete_page(page_title, page_id)
def stage3(line, output_format="neo"):
"""
I combine the results from the previous stages into a single cohesive file
"""
global STAGE3_ROW_COUNTER
page_id, page_links, page_info = split_page_info(unicode(line.strip("\n"), "utf-8"), get_links=False)
if page_info is None: # Ignore redirects for now
return None
page_title, flags = page_info
#print "flags: %s" % flags
if not flags & FLAG_REDIRECT:
page_links = get_ids_from_titles(page_links, False)
if flags & FLAG_SEEN:
# Already visited this page before, output to an SQL file instead
if output_format == "neo":
return None, "\n".join(["%s\t%s" % (page_id, link_id) for link_id in set(page_links)])
else:
with open('stage3.sql', 'a') as fd:
fd.write("UPDATE pages SET links = uniq(array_cat(links, ARRAY[%s]::integer[])) WHERE id = %s;\n" %
(",".join(map(str, set(page_links))), page_id))
else:
# CSV output
# id, title, is_redirect, links_array
if output_format == "neo":
#return u"({id:%s, name:%s})" % (page_id, json.dumps(page_title).encode("unicode-escape"))
return ("%s\t%s\n" % (page_id, page_title)).encode("utf-8"),\
"%s\n" % "\n".join(["%s\t%s" % (page_id, link_id) for link_id in set(page_links)])
#return ((page_id, page_title),),
else:
return "%s|%s|%s|{%s}\n" % (page_id, page_title, is_redirect,
",".join(map(str, set(page_links))))
if __name__ == "__main__":
logger.info("Loading stage1.csv into memory")
with open("stage1.csv", 'rb', buffering=1024*1024) as csv_fd:
run_with_progressbar(csv_fd, None, handle_stage1_line, os.path.getsize("stage1.csv"))
logger.info("Loaded %s/%s page infos. Strategies: %s and %s" % (len(STAGE3_TITLES_TO_ID), len(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_TITLES_TO_ID)))
with open("stage2.csv", "rb", buffering=1024*1024) as input_fd:
run_with_progressbar(input_fd, None, stage3_pre, os.path.getsize("stage2.csv"))
logger.info("Have %s/%s page infos. Strategies: %s and %s" % (len(STAGE3_TITLES_TO_ID), len(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_ID_TO_DATA),
__pypy__.dictstrategy(STAGE3_TITLES_TO_ID)))
logger.info("Starting dump")
with open('stage2.csv', "rb", buffering=1024*1024*8) as input_fd: # , encoding="utf-8", buffering=1024*8
with open('stage3.nodes', mode="wb", buffering=1024*1024*8) as nodes_fd:
with open('stage3.links', mode="wb", buffering=1024*1024*20) as links_fd:
formatter = MultiCSVFormatter(((nodes_fd, ("id:int:node_id", "title:string")),
(links_fd, ("id:int:node_id", "id:int:node_id"))))
run_with_progressbar(input_fd, None,
functools.partial(stage3, output_format="neo"),
os.path.getsize("stage2.csv"),
formatter=formatter) | [
"[email protected]"
] | |
92df4a82b4256ff8f683501f22e0c09dbea8b0c0 | b89df6019163d7b18a8ecb4003939f6235b5de85 | /mnist/cnn_mnist.py | 0f8dd40e176c805f08e1a65e10cdad7e16b51923 | [] | no_license | liketheflower/tf_practise | fdd22b608ca7d513a4972497466e3fc7a12762b6 | 2725b52169b2f0044d20b3c33c86485336e65483 | refs/heads/master | 2020-03-19T23:21:16.467649 | 2018-06-19T03:56:07 | 2018-06-19T03:56:07 | 137,003,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,709 | py | #opyright 2016 iThe TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
def cnn_model_fn(features, labels, mode):
"""Model function for CNN."""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
input_layer = tf.reshape(features["x"], [-1, 28, 28, 1])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1 = tf.layers.conv2d(
inputs=input_layer,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu)
# Add dropout operation; 0.6 probability that element will be kept
dropout = tf.layers.dropout(
inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN)
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits = tf.layers.dense(inputs=dropout, units=10)
predictions = {
# Generate predictions (for PREDICT and EVAL mode)
"classes": tf.argmax(input=logits, axis=1),
# Add `softmax_tensor` to the graph. It is used for PREDICT and by the
# `logging_hook`.
"probabilities": tf.nn.softmax(logits, name="softmax_tensor")
}
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Calculate Loss (for both TRAIN and EVAL modes)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(
loss=loss,
global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op)
# Add evaluation metrics (for EVAL mode)
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
labels=labels, predictions=predictions["classes"])}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Load training and eval data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
train_data = mnist.train.images # Returns np.array
train_labels = np.asarray(mnist.train.labels, dtype=np.int32)
eval_data = mnist.test.images # Returns np.array
eval_labels = np.asarray(mnist.test.labels, dtype=np.int32)
# Create the Estimator
mnist_classifier = tf.estimator.Estimator(
model_fn=cnn_model_fn, model_dir="/tmp/mnist_convnet_model")
# Set up logging for predictions
# Log the values in the "Softmax" tensor with label "probabilities"
tensors_to_log = {"probabilities": "softmax_tensor"}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=50)
# Train the model
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
mnist_classifier.train(
input_fn=train_input_fn,
steps=20000,
hooks=[logging_hook])
# Evaluate the model and print results
eval_input_fn = tf.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print(eval_results)
if __name__ == "__main__":
tf.app.run()
| [
"[email protected]"
] | |
d89b26a0c2aa42dccc501acbb07ac7e597b9047a | 102b67d83e12219f3bf4bea6ed691ddd9c2e69f1 | /ad/templatetags/ads.py | 7e6251780e534773006f27332ae6205e14bdccc8 | [
"BSD-3-Clause"
] | permissive | nicksergeant/snipt-old | 2cb6bec629d798dd83fc39f0105828f1fd40a51a | f2f1e9f183fb69bcc0fabbc25059bfd1c60527e2 | refs/heads/master | 2021-01-18T14:03:01.426851 | 2012-09-19T00:09:48 | 2012-09-19T00:09:48 | 865,573 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 882 | py | from tagging.models import TaggedItem
from snipt.ad.models import Ad
from django import template
register = template.Library()
@register.simple_tag
def ad(tag):
try:
ads = TaggedItem.objects.get_by_model(Ad.objects.order_by('?'), tag)
ad = ads[0]
except:
ads = Ad.objects.order_by('?')
ad = ads[0]
tag = ''
return """
<h1 style="margin-bottom: 20px; padding-top: 15px;">A good %s read</h1>
<div class="amazon-book clearfix">
<div class="amazon-title">
<a href="%s" rel="nofollow" class="clearfix">
<img src="/media/%s" alt="%s" title="%s" />
%s
</a>
</div>
</div>
""" % (tag,
ad.url,
ad.image,
ad.title,
ad.title,
ad.title)
| [
"[email protected]"
] | |
974c0c7fd25b0de5202f8adde919a1f585b0a4ed | aa45f6f5106517c582b21691ce22ad808339ec64 | /borax/calendars/birthday.py | aea5997b9e454ee9eaf8a2861a068b38780a781c | [
"MIT"
] | permissive | kinegratii/borax | 86b1a87c686f9b74db8d919afe30761497888368 | 06407958a6ba3115d783ed6457c2e7355a3f237c | refs/heads/master | 2023-03-11T06:09:20.040607 | 2022-11-15T02:39:43 | 2022-11-15T02:39:43 | 126,959,349 | 67 | 8 | MIT | 2022-11-15T02:39:44 | 2018-03-27T09:07:08 | Python | UTF-8 | Python | false | false | 1,151 | py | from datetime import date
from .lunardate import LunarDate, LCalendars
def nominal_age(birthday, today=None):
birthday = LCalendars.cast_date(birthday, LunarDate)
if today:
today = LCalendars.cast_date(today, LunarDate)
else:
today = LunarDate.today()
return today.year - birthday.year + 1
def actual_age_solar(birthday, today=None):
"""See more at https://stackoverflow.com/questions/2217488/age-from-birthdate-in-python/9754466#9754466
:param birthday:
:param today:
:return:
"""
birthday = LCalendars.cast_date(birthday, date)
if today:
today = LCalendars.cast_date(today, date)
else:
today = date.today()
return today.year - birthday.year - ((today.month, today.day) < (birthday.month, birthday.day))
def actual_age_lunar(birthday, today=None):
birthday = LCalendars.cast_date(birthday, LunarDate)
if today:
today = LCalendars.cast_date(today, LunarDate)
else:
today = LunarDate.today()
return today.year - birthday.year - (
(today.month, today.leap, today.day) < (birthday.month, birthday.leap, birthday.day)
)
| [
"[email protected]"
] | |
689a7bcf9a17e9920971e0f75dbeae77f831658a | 65b9a63e8c132f32aeb56961968f5e363bd9a087 | /20190708_python识别中文车牌windows/同样的参数训练结果不同/09_last0.6937/keras_train_test.py | 8cb1811604a268356d28d0685ff1158985f6c64e | [] | no_license | 346644054/examples2019 | e70f13cfb56c3478fc6e335c730e0e70e70a6226 | 5f9777e7a887e635971156354f56ce065fa3f41e | refs/heads/master | 2022-04-09T03:52:52.973414 | 2020-02-28T03:05:02 | 2020-02-28T03:05:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,425 | py | # -*- coding: utf-8 -*-
"""
Vehicle plate recognition
using keras
Author: elesun
https://cloud.tencent.com/developer/article/1005199
# -*- coding: utf-8 -*-
"""
from __future__ import print_function
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from keras.models import Sequential,Input,Model
from keras.layers import Conv2D,MaxPooling2D,Dense,Dropout,Activation,Flatten
from keras.callbacks import ModelCheckpoint
from keras.optimizers import Adam
from keras.models import load_model
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
import cv2
#os.environ["CUDA_VISIBLE_DEVICES"] = "0" #"1,0"
#####################车牌数据生成器,################################################
#用于深度神经网络的数据输入
#开源的车牌生成器,随机生成的车牌达到以假乱真的效果
#国内机动车车牌7位,第一位是各省的汉字,第二位是 A-Z 的大写字母,3-7位则是数字、字母混合
from genplate import *
chars = ["京", "沪", "津", "渝", "冀", "晋", "蒙", "辽", "吉", "黑", "苏", "浙", "皖", "闽", "赣", "鲁", "豫", "鄂", "湘", "粤", "桂",
"琼", "川", "贵", "云", "藏", "陕", "甘", "青", "宁", "新", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A",
"B", "C", "D", "E", "F", "G", "H", "J", "K", "L", "M", "N", "P", "Q", "R", "S", "T", "U", "V", "W", "X",
"Y", "Z"
]
M_strIdx = dict(zip(chars, range(len(chars))))
#print("M_strIdx\n",M_strIdx)
Ge = GenPlate("./font/platech.ttf",'./font/platechar.ttf',"./NoPlates")
model_dir = "./model"
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
def gen(batch_size=32):
while True:
l_plateStr, l_plateImg = Ge.genBatch(batch_size, 2, range(31, 65), "plate", (272, 72))
#print('l_plateStr type :', type(l_plateStr))
#print('l_plateStr = ', l_plateStr)
#print('l_plateImg type = ', type(l_plateImg))
#print('l_plateImg len :', len(l_plateImg))
X = np.array(l_plateImg, dtype=np.uint8)
#print 'X type :',type(X)
#print 'X.dtype :',X.dtype
#print 'X.shape :',X.shape
#print np.array(list(map(lambda a: [a for a in list(x)], l_plateStr)))#,dtype=np.float32)
#ytmp = np.array(list(map(lambda a: [a for a in list(x)], l_plateStr)))#, dtype=np.uint8)# x: [M_strIdx[a]
temp = list(map(lambda x: [a for a in list(x)], l_plateStr))#elesun TypeError: object of type 'map' has no len()
#print("temp\n",temp)
#print('temp type :', type(temp)) # <type 'list'>
#print("temp[0]\n",temp[0])
#print('temp[0] type :', type(temp[0])) # <type 'list'>
#print("temp[0][0]\n",temp[0][0])
#print('temp[0][0] type :', type(temp[0][0])) # <type 'str'>
#print("temp[0][0] + temp[0][1] + temp[0][2] :", (temp[0][0] + temp[0][1] + temp[0][2]))
temp2 = [] #list的第一层
for i in range(len(temp)):
temp1 = [] #list的第二层
for j in range(len(temp[i])):
if j == 0 :
temp1.append(temp[i][0] + temp[i][1] + temp[i][2]) #拼接字符串形成汉字 闽
elif 1 <= j <= 2 :
continue # 只拼接前三个字符为汉字
else :
temp1.append(temp[i][j]) #后面只追加 车牌数字和字符
temp2.append(temp1)
#print("temp2\n",temp2)
#打印字典对应值是否正确
#for i in range(len(temp2)):
# for j in range(len(temp2[i])):
# print("temp2[%d][%d]=" % (i, j),temp2[i][j],"; M_strIdx[(temp2[%d][%d])]="%(i,j),M_strIdx[(temp2[i][j])])
#print('temp2 type :', type(temp2)) # <type 'numpy.ndarray'>
#print("M_strIdx['A']",M_strIdx['A'])
#print("M_strIdx['\xe6\xb9\x98']", M_strIdx['\xe6\xb9\x98'])
#print("M_strIdx['\xe5']", M_strIdx['\xe5']) # error
#ytmp = np.array(list(map(lambda x: [M_strIdx[a] for a in list(x)], l_plateStr)), dtype=np.uint8)
ytmp = np.array(list(map(lambda x: [M_strIdx[a] for a in x], temp)), dtype=np.uint8)#elesun temp2 for python2 ubuntu
#print('ytmp\n', ytmp)
#print ('ytmp type :',type(ytmp)) # <type 'numpy.ndarray'>
#print ('ytmp.dtype :',ytmp.dtype) # uint8
#print ('ytmp.shape :',ytmp.shape) # (32, 7)
y = np.zeros([ytmp.shape[1],batch_size,len(chars)])# 7,32,65
#print 'y type :',type(y)
#print 'y.dtype :',y.dtype
#print 'y.shape :',y.shape
for batch in range(batch_size):
for idx,row_i in enumerate(ytmp[batch]):
y[idx,batch,row_i] = 1
yield X, [yy for yy in y]
#########################定义网络并训练###########################################
def model_build_train(lr=0.001, epochs=25, batch_size=32, model_name="model_best.h5"):
print("building network ...")
#用一个 一组卷积层+7个全链接层 的架构,来对应输入的车牌图片
input_tensor = Input((72, 272, 3))
x = input_tensor
for i in range(3):
x = Conv2D(32*2**i, (3, 3), activation='relu')(x)
x = Conv2D(32*2**i, (3, 3), activation='relu')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dropout(0.25)(x)
n_class = len(chars) #elesun len(chars)
x = [Dense(n_class, activation='softmax', name='c%d'%(i+1))(x) for i in range(7)]
model = Model(inputs=input_tensor, outputs=x)
model.summary()
print("save network picture")
#SVG(model_to_dot(model=model, show_layer_names=True, show_shapes=True).create(prog='dot', format='svg'))
#SVG(model_to_dot(model).create(prog='dot', format='svg'))
print("training network ...")
adam = Adam(lr=lr)
model.compile(loss='categorical_crossentropy',
optimizer=adam,
metrics=['accuracy'])
best_model = ModelCheckpoint(os.path.join(model_dir, model_name), monitor='val_loss', verbose=0, save_best_only=True)
#print("gen(batch_size)",list(gen(batch_size)))
#fit_generator(generator, steps_per_epoch=None, epochs=1, verbose=1, callbacks=None, validation_data=None, validation_steps=None, class_weight=None, max_queue_size=10, workers=1, use_multiprocessing=False, shuffle=True, initial_epoch=0)
model.fit_generator(gen(batch_size), steps_per_epoch=200, epochs=epochs,
validation_data=gen(batch_size), validation_steps=20,
verbose=2,callbacks=[best_model]) #每个epoch输出一行记录
#########################读取测试车牌图片###########################################
def load_plate_data(data_dir="./recognize_samples"):
print("loading plate data ...")
plateStr = []
plateImg = []
file_list = os.listdir(data_dir)
#print(file_list)
for filename in file_list:
path = ''
path = os.path.join(data_dir, filename)
image = cv2.imread(path) #读取图片 cv2.IMREAD_COLOR cv2.IMREAD_GRAYSCALE
#print("image.shape:",image.shape) #(72, 272, 3)
if image.shape != (72, 272, 3) :
# image = cv2.resize(image, (width, height), interpolation=cv2.INTER_LANCZOS4)
print("picture %s size error, maybe resize before load !"%(filename))
continue
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#print ("%s has been read!"%filename)
plateStr.append(filename[:-4])
plateImg.append(image)
return plateStr, plateImg
##########################展示模型预测结果########################################
def model_load_predict_plt(model_name,test_Img):
# 加载模型
print('load the trained model')
model = load_model(os.path.join(model_dir, model_name))
print("###############model predict###############")
results = model.predict(np.array(test_Img))
print('results type :', type(results)) #<type 'list'>
results = np.array(results)
print ('results type :',type(results)) #<type 'numpy.ndarray'>
print ('results.dtype :',results.dtype) #float32
print ('results.shape :',results.shape) #(7, num, 65)
results = np.argmax(results, axis = 2)
results = results.T
print ('results.dtype :',results.dtype) #int64
print ('results.shape :',results.shape) #(num, 7)
print('results\n', results) #
#print("M_strIdx[0]",M_strIdx[0])
#results = "".join([M_strIdx[xx] for xx in results.T])
predict_plate_str = [] # list的第一层
for i in range(results.shape[0]):
temp = [] # list的第二层
for j in range(results.shape[1]):
for key, value in M_strIdx.items():
if value == results[i,j]:
print("key",key)
temp.append(key)
predict_plate_str.append(temp)
print('predict_plate_str type :', type(predict_plate_str)) #
print('predict_plate_str\n', predict_plate_str)
# predict_plate_str = np.array(predict_plate_str)
# print('predict_plate_str type :', type(predict_plate_str))
# print ('predict_plate_str.dtype :',predict_plate_str.dtype) #
# print ('predict_plate_str.shape :',results.shape) #
# print('predict_plate_str\n', predict_plate_str) #
print("###############plt results###############")
myfont = FontProperties(fname='./font/Lantinghei.ttc')
# 用来正常显示中文标签,SimHei是字体名称,字体必须再系统中存在,字体的查看方式和安装第三部分
plt.rcParams['font.sans-serif'] = ['SimHei']
# 用来正常显示负号
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
fig = plt.figure(figsize=(12,12))
#l_titles = list(map(lambda x: "".join([M_idxStr[xx] for xx in x]), np.argmax(np.array(model.predict( np.array(l_plateImg) )), 2).T))
for idx,img in enumerate(test_Img[0:12]):
ax = fig.add_subplot(4,3,idx+1)
ax.imshow(img)
ax.set_title(predict_plate_str[idx],fontproperties=myfont)
ax.set_axis_off()
plt.show()
if __name__ == "__main__":
model_name = "model_best.h5"
model_build_train(lr=0.0001, epochs=30, batch_size=16, model_name="model_best.h5")
test_data_dir = "./recognize_samples"
test_name, test_Img = load_plate_data(test_data_dir)
print("test_name",test_name)
model_load_predict_plt(model_name, test_Img)
| [
"[email protected]"
] | |
a0f1f2557839af7ed23dfb81c8ff5bea64a59bc4 | e4c25590298b084e3fb44b0b325a05699fac4202 | /Kattis/sevenwonders.py | 5a96568a7cc25485bbe157259a725421d500474b | [] | no_license | shakib609/competitive-programming | 520028bd1147e7e43e708875b6390e1a7d65a94b | 5090d5d3650b8055e16651ed9de5380cc7fdb7aa | refs/heads/master | 2022-12-09T12:33:20.167332 | 2022-12-07T17:28:30 | 2022-12-07T17:28:30 | 67,289,210 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | s = input().strip()
t, c, g = [0, 0, 0]
for ch in s:
if ch == 'T':
t += 1
elif ch == 'C':
c += 1
else:
g += 1
result = t ** 2 + c ** 2 + g ** 2
result += min([t, c, g]) * 7
print(result)
| [
"[email protected]"
] | |
52afe556959590049b64feb71a30c5fce7fedaf1 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/containerregistry/v20190501/get_webhook.py | 7948e368ab3b2de549dbfecb516f227ee8cca61a | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,711 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetWebhookResult',
'AwaitableGetWebhookResult',
'get_webhook',
'get_webhook_output',
]
@pulumi.output_type
class GetWebhookResult:
"""
An object that represents a webhook for a container registry.
"""
def __init__(__self__, actions=None, id=None, location=None, name=None, provisioning_state=None, scope=None, status=None, tags=None, type=None):
if actions and not isinstance(actions, list):
raise TypeError("Expected argument 'actions' to be a list")
pulumi.set(__self__, "actions", actions)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def actions(self) -> Sequence[str]:
"""
The list of actions that trigger the webhook to post notifications.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
The location of the resource. This cannot be changed after the resource is created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the webhook at the time the operation was called.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def scope(self) -> Optional[str]:
"""
The scope of repositories where the event can be triggered. For example, 'foo:*' means events for all tags under repository 'foo'. 'foo:bar' means events for 'foo:bar' only. 'foo' is equivalent to 'foo:latest'. Empty means all events.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
The status of the webhook at the time the operation was called.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetWebhookResult(GetWebhookResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetWebhookResult(
actions=self.actions,
id=self.id,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
scope=self.scope,
status=self.status,
tags=self.tags,
type=self.type)
def get_webhook(registry_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
webhook_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetWebhookResult:
"""
An object that represents a webhook for a container registry.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str webhook_name: The name of the webhook.
"""
__args__ = dict()
__args__['registryName'] = registry_name
__args__['resourceGroupName'] = resource_group_name
__args__['webhookName'] = webhook_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerregistry/v20190501:getWebhook', __args__, opts=opts, typ=GetWebhookResult).value
return AwaitableGetWebhookResult(
actions=__ret__.actions,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
scope=__ret__.scope,
status=__ret__.status,
tags=__ret__.tags,
type=__ret__.type)
@_utilities.lift_output_func(get_webhook)
def get_webhook_output(registry_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
webhook_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetWebhookResult]:
"""
An object that represents a webhook for a container registry.
:param str registry_name: The name of the container registry.
:param str resource_group_name: The name of the resource group to which the container registry belongs.
:param str webhook_name: The name of the webhook.
"""
...
| [
"[email protected]"
] | |
b4cb6b650396f272e17879ab0ae5704357b257f3 | ce564f0a9b6f261e5303779ab95f8c1629487ac7 | /django_mysql_fix/version.py | e7cea81e8496619ab8dc010d38b5a71077b6eb17 | [
"MIT"
] | permissive | frol/django-mysql-fix | 192e334cb94c0fdf14516383022d6c5d4486c1d8 | 96d1e960b49ab686ea6d8d766bb4d86edb806e47 | refs/heads/master | 2021-01-19T14:09:38.956874 | 2014-05-03T16:07:11 | 2014-05-03T16:07:11 | 18,802,306 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | VERSION = (0, 1, 6)
__version__ = '.'.join(unicode(x) for x in VERSION)
| [
"[email protected]"
] | |
5d1ff2249d14c248fe7903d781b51ba405023c40 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-4/a2602090981a65652199423a185e3c2bd8b2c356-<merge_bgp_peer_af_other>-bug.py | 3f4c944a4a6b2f086abda5e8ebe56efc68a702a4 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,936 | py | def merge_bgp_peer_af_other(self, **kwargs):
' merge_bgp_peer_af_other '
module = kwargs['module']
vrf_name = module.params['vrf_name']
af_type = module.params['af_type']
remote_address = module.params['remote_address']
conf_str = (CE_MERGE_BGP_PEER_AF_HEADER % (vrf_name, af_type, remote_address))
cmds = []
advertise_irb = module.params['advertise_irb']
if (advertise_irb != 'no_use'):
conf_str += ('<advertiseIrb>%s</advertiseIrb>' % advertise_irb)
if (advertise_irb == 'true'):
cmd = ('peer %s advertise irb' % remote_address)
else:
cmd = ('undo peer %s advertise irb' % remote_address)
cmds.append(cmd)
advertise_arp = module.params['advertise_arp']
if (advertise_arp != 'no_use'):
conf_str += ('<advertiseArp>%s</advertiseArp>' % advertise_arp)
if (advertise_arp == 'true'):
cmd = ('peer %s advertise arp' % remote_address)
else:
cmd = ('undo peer %s advertise arp' % remote_address)
cmds.append(cmd)
advertise_remote_nexthop = module.params['advertise_remote_nexthop']
if (advertise_remote_nexthop != 'no_use'):
conf_str += ('<advertiseRemoteNexthop>%s</advertiseRemoteNexthop>' % advertise_remote_nexthop)
if (advertise_remote_nexthop == 'true'):
cmd = ('peer %s advertise remote-nexthop' % remote_address)
else:
cmd = ('undo peer %s advertise remote-nexthop' % remote_address)
cmds.append(cmd)
advertise_community = module.params['advertise_community']
if (advertise_community != 'no_use'):
conf_str += ('<advertiseCommunity>%s</advertiseCommunity>' % advertise_community)
if (advertise_community == 'true'):
cmd = ('peer %s advertise-community' % remote_address)
else:
cmd = ('undo peer %s advertise-community' % remote_address)
cmds.append(cmd)
advertise_ext_community = module.params['advertise_ext_community']
if (advertise_ext_community != 'no_use'):
conf_str += ('<advertiseExtCommunity>%s</advertiseExtCommunity>' % advertise_ext_community)
if (advertise_ext_community == 'true'):
cmd = ('peer %s advertise-ext-community' % remote_address)
else:
cmd = ('undo peer %s advertise-ext-community' % remote_address)
cmds.append(cmd)
discard_ext_community = module.params['discard_ext_community']
if (discard_ext_community != 'no_use'):
conf_str += ('<discardExtCommunity>%s</discardExtCommunity>' % discard_ext_community)
if (discard_ext_community == 'true'):
cmd = ('peer %s discard-ext-community' % remote_address)
else:
cmd = ('undo peer %s discard-ext-community' % remote_address)
cmds.append(cmd)
allow_as_loop_enable = module.params['allow_as_loop_enable']
if (allow_as_loop_enable != 'no_use'):
conf_str += ('<allowAsLoopEnable>%s</allowAsLoopEnable>' % allow_as_loop_enable)
if (allow_as_loop_enable == 'true'):
cmd = ('peer %s allow-as-loop' % remote_address)
else:
cmd = ('undo peer %s allow-as-loop' % remote_address)
cmds.append(cmd)
allow_as_loop_limit = module.params['allow_as_loop_limit']
if allow_as_loop_limit:
conf_str += ('<allowAsLoopLimit>%s</allowAsLoopLimit>' % allow_as_loop_limit)
if (allow_as_loop_enable == 'true'):
cmd = ('peer %s allow-as-loop %s' % (remote_address, allow_as_loop_limit))
else:
cmd = ('undo peer %s allow-as-loop' % remote_address)
cmds.append(cmd)
keep_all_routes = module.params['keep_all_routes']
if (keep_all_routes != 'no_use'):
conf_str += ('<keepAllRoutes>%s</keepAllRoutes>' % keep_all_routes)
if (keep_all_routes == 'true'):
cmd = ('peer %s keep-all-routes' % remote_address)
else:
cmd = ('undo peer %s keep-all-routes' % remote_address)
cmds.append(cmd)
nexthop_configure = module.params['nexthop_configure']
if nexthop_configure:
conf_str += ('<nextHopConfigure>%s</nextHopConfigure>' % nexthop_configure)
if (nexthop_configure == 'local'):
cmd = ('peer %s next-hop-local' % remote_address)
cmds.append(cmd)
elif (nexthop_configure == 'invariable'):
cmd = ('peer %s next-hop-invariable' % remote_address)
cmds.append(cmd)
preferred_value = module.params['preferred_value']
if preferred_value:
conf_str += ('<preferredValue>%s</preferredValue>' % preferred_value)
cmd = ('peer %s preferred-value %s' % (remote_address, preferred_value))
cmds.append(cmd)
public_as_only = module.params['public_as_only']
if (public_as_only != 'no_use'):
conf_str += ('<publicAsOnly>%s</publicAsOnly>' % public_as_only)
if (public_as_only == 'true'):
cmd = ('peer %s public-as-only' % remote_address)
else:
cmd = ('undo peer %s public-as-only' % remote_address)
cmds.append(cmd)
public_as_only_force = module.params['public_as_only_force']
if (public_as_only_force != 'no_use'):
conf_str += ('<publicAsOnlyForce>%s</publicAsOnlyForce>' % public_as_only_force)
if (public_as_only_force == 'true'):
cmd = ('peer %s public-as-only force' % remote_address)
else:
cmd = ('undo peer %s public-as-only force' % remote_address)
cmds.append(cmd)
public_as_only_limited = module.params['public_as_only_limited']
if (public_as_only_limited != 'no_use'):
conf_str += ('<publicAsOnlyLimited>%s</publicAsOnlyLimited>' % public_as_only_limited)
if (public_as_only_limited == 'true'):
cmd = ('peer %s public-as-only limited' % remote_address)
else:
cmd = ('undo peer %s public-as-only limited' % remote_address)
cmds.append(cmd)
public_as_only_replace = module.params['public_as_only_replace']
if (public_as_only_replace != 'no_use'):
conf_str += ('<publicAsOnlyReplace>%s</publicAsOnlyReplace>' % public_as_only_replace)
if (public_as_only_replace == 'true'):
cmd = ('peer %s public-as-only force replace' % remote_address)
else:
cmd = ('undo peer %s public-as-only force replace' % remote_address)
cmds.append(cmd)
public_as_only_skip_peer_as = module.params['public_as_only_skip_peer_as']
if (public_as_only_skip_peer_as != 'no_use'):
conf_str += ('<publicAsOnlySkipPeerAs>%s</publicAsOnlySkipPeerAs>' % public_as_only_skip_peer_as)
if (public_as_only_skip_peer_as == 'true'):
cmd = ('peer %s public-as-only force include-peer-as' % remote_address)
else:
cmd = ('undo peer %s public-as-only force include-peer-as' % remote_address)
cmds.append(cmd)
route_limit = module.params['route_limit']
if route_limit:
conf_str += ('<routeLimit>%s</routeLimit>' % route_limit)
cmd = ('peer %s route-limit %s' % (remote_address, route_limit))
cmds.append(cmd)
route_limit_percent = module.params['route_limit_percent']
if route_limit_percent:
conf_str += ('<routeLimitPercent>%s</routeLimitPercent>' % route_limit_percent)
cmd = ('peer %s route-limit %s %s' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
route_limit_type = module.params['route_limit_type']
if route_limit_type:
conf_str += ('<routeLimitType>%s</routeLimitType>' % route_limit_type)
if (route_limit_type == 'alertOnly'):
cmd = ('peer %s route-limit %s %s alert-only' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
elif (route_limit_type == 'idleForever'):
cmd = ('peer %s route-limit %s %s idle-forever' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
elif (route_limit_type == 'idleTimeout'):
cmd = ('peer %s route-limit %s %s idle-timeout' % (remote_address, route_limit, route_limit_percent))
cmds.append(cmd)
route_limit_idle_timeout = module.params['route_limit_idle_timeout']
if route_limit_idle_timeout:
conf_str += ('<routeLimitIdleTimeout>%s</routeLimitIdleTimeout>' % route_limit_idle_timeout)
cmd = ('peer %s route-limit %s %s idle-timeout %s' % (remote_address, route_limit, route_limit_percent, route_limit_idle_timeout))
cmds.append(cmd)
rt_updt_interval = module.params['rt_updt_interval']
if rt_updt_interval:
conf_str += ('<rtUpdtInterval>%s</rtUpdtInterval>' % rt_updt_interval)
cmd = ('peer %s route-update-interval %s' % (remote_address, rt_updt_interval))
cmds.append(cmd)
redirect_ip = module.params['redirect_ip']
if (redirect_ip != 'no_use'):
conf_str += ('<redirectIP>%s</redirectIP>' % redirect_ip)
redirect_ip_validation = module.params['redirect_ip_validation']
if (redirect_ip_validation != 'no_use'):
conf_str += ('<redirectIPVaildation>%s</redirectIPVaildation>' % redirect_ip_validation)
reflect_client = module.params['reflect_client']
if (reflect_client != 'no_use'):
conf_str += ('<reflectClient>%s</reflectClient>' % reflect_client)
if (reflect_client == 'true'):
cmd = ('peer %s reflect-client' % remote_address)
else:
cmd = ('undo peer %s reflect-client' % remote_address)
cmds.append(cmd)
substitute_as_enable = module.params['substitute_as_enable']
if (substitute_as_enable != 'no_use'):
conf_str += ('<substituteAsEnable>%s</substituteAsEnable>' % substitute_as_enable)
if (substitute_as_enable == 'true'):
cmd = ('peer %s substitute-as' % remote_address)
else:
cmd = ('undo peer %s substitute-as' % remote_address)
cmds.append(cmd)
import_rt_policy_name = module.params['import_rt_policy_name']
if import_rt_policy_name:
conf_str += ('<importRtPolicyName>%s</importRtPolicyName>' % import_rt_policy_name)
cmd = ('peer %s route-policy %s import' % (remote_address, import_rt_policy_name))
cmds.append(cmd)
export_rt_policy_name = module.params['export_rt_policy_name']
if export_rt_policy_name:
conf_str += ('<exportRtPolicyName>%s</exportRtPolicyName>' % export_rt_policy_name)
cmd = ('peer %s route-policy %s export' % (remote_address, export_rt_policy_name))
cmds.append(cmd)
import_pref_filt_name = module.params['import_pref_filt_name']
if import_pref_filt_name:
conf_str += ('<importPrefFiltName>%s</importPrefFiltName>' % import_pref_filt_name)
cmd = ('peer %s filter-policy %s import' % (remote_address, import_pref_filt_name))
cmds.append(cmd)
export_pref_filt_name = module.params['export_pref_filt_name']
if export_pref_filt_name:
conf_str += ('<exportPrefFiltName>%s</exportPrefFiltName>' % export_pref_filt_name)
cmd = ('peer %s filter-policy %s export' % (remote_address, export_pref_filt_name))
cmds.append(cmd)
import_as_path_filter = module.params['import_as_path_filter']
if import_as_path_filter:
conf_str += ('<importAsPathFilter>%s</importAsPathFilter>' % import_as_path_filter)
cmd = ('peer %s as-path-filter %s import' % (remote_address, import_as_path_filter))
cmds.append(cmd)
export_as_path_filter = module.params['export_as_path_filter']
if export_as_path_filter:
conf_str += ('<exportAsPathFilter>%s</exportAsPathFilter>' % export_as_path_filter)
cmd = ('peer %s as-path-filter %s export' % (remote_address, export_as_path_filter))
cmds.append(cmd)
import_as_path_name_or_num = module.params['import_as_path_name_or_num']
if import_as_path_name_or_num:
conf_str += ('<importAsPathNameOrNum>%s</importAsPathNameOrNum>' % import_as_path_name_or_num)
cmd = ('peer %s as-path-filter %s import' % (remote_address, import_as_path_name_or_num))
cmds.append(cmd)
export_as_path_name_or_num = module.params['export_as_path_name_or_num']
if export_as_path_name_or_num:
conf_str += ('<exportAsPathNameOrNum>%s</exportAsPathNameOrNum>' % export_as_path_name_or_num)
cmd = ('peer %s as-path-filter %s export' % (remote_address, export_as_path_name_or_num))
cmds.append(cmd)
import_acl_name_or_num = module.params['import_acl_name_or_num']
if import_acl_name_or_num:
conf_str += ('<importAclNameOrNum>%s</importAclNameOrNum>' % import_acl_name_or_num)
cmd = ('peer %s filter-policy %s import' % (remote_address, import_acl_name_or_num))
cmds.append(cmd)
export_acl_name_or_num = module.params['export_acl_name_or_num']
if export_acl_name_or_num:
conf_str += ('<exportAclNameOrNum>%s</exportAclNameOrNum>' % export_acl_name_or_num)
cmd = ('peer %s filter-policy %s export' % (remote_address, export_acl_name_or_num))
cmds.append(cmd)
ipprefix_orf_enable = module.params['ipprefix_orf_enable']
if (ipprefix_orf_enable != 'no_use'):
conf_str += ('<ipprefixOrfEnable>%s</ipprefixOrfEnable>' % ipprefix_orf_enable)
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf ip-prefix' % remote_address)
else:
cmd = ('undo peer %s capability-advertise orf ip-prefix' % remote_address)
cmds.append(cmd)
is_nonstd_ipprefix_mod = module.params['is_nonstd_ipprefix_mod']
if (is_nonstd_ipprefix_mod != 'no_use'):
conf_str += ('<isNonstdIpprefixMod>%s</isNonstdIpprefixMod>' % is_nonstd_ipprefix_mod)
if (is_nonstd_ipprefix_mod == 'true'):
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf non-standard-compatible' % remote_address)
else:
cmd = ('undo peer %s capability-advertise orf non-standard-compatible' % remote_address)
cmds.append(cmd)
else:
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf' % remote_address)
else:
cmd = ('undo peer %s capability-advertise orf' % remote_address)
cmds.append(cmd)
orftype = module.params['orftype']
if orftype:
conf_str += ('<orftype>%s</orftype>' % orftype)
orf_mode = module.params['orf_mode']
if orf_mode:
conf_str += ('<orfMode>%s</orfMode>' % orf_mode)
if (ipprefix_orf_enable == 'true'):
cmd = ('peer %s capability-advertise orf ip-prefix %s' % (remote_address, orf_mode))
else:
cmd = ('undo peer %s capability-advertise orf ip-prefix %s' % (remote_address, orf_mode))
cmds.append(cmd)
soostring = module.params['soostring']
if soostring:
conf_str += ('<soostring>%s</soostring>' % soostring)
cmd = ('peer %s soo %s' % (remote_address, soostring))
cmds.append(cmd)
cmd = ''
default_rt_adv_enable = module.params['default_rt_adv_enable']
if (default_rt_adv_enable != 'no_use'):
conf_str += ('<defaultRtAdvEnable>%s</defaultRtAdvEnable>' % default_rt_adv_enable)
if (default_rt_adv_enable == 'true'):
cmd += ('peer %s default-route-advertise' % remote_address)
else:
cmd += ('undo peer %s default-route-advertise' % remote_address)
cmds.append(cmd)
default_rt_adv_policy = module.params['default_rt_adv_policy']
if default_rt_adv_policy:
conf_str += ('<defaultRtAdvPolicy>%s</defaultRtAdvPolicy>' % default_rt_adv_policy)
cmd = (' route-policy %s' % default_rt_adv_policy)
cmds.append(cmd)
default_rt_match_mode = module.params['default_rt_match_mode']
if default_rt_match_mode:
conf_str += ('<defaultRtMatchMode>%s</defaultRtMatchMode>' % default_rt_match_mode)
if (default_rt_match_mode == 'matchall'):
cmd += ' conditional-route-match-all'
elif (default_rt_match_mode == 'matchany'):
cmd += ' conditional-route-match-any'
if cmd:
cmds.append(cmd)
add_path_mode = module.params['add_path_mode']
if add_path_mode:
conf_str += ('<addPathMode>%s</addPathMode>' % add_path_mode)
if (add_path_mode == 'receive'):
cmd += ' add-path receive'
elif (add_path_mode == 'send'):
cmd += ' add-path send'
elif (add_path_mode == 'both'):
cmd += ' add-path both'
if cmd:
cmds.append(cmd)
adv_add_path_num = module.params['adv_add_path_num']
if adv_add_path_num:
conf_str += ('<advAddPathNum>%s</advAddPathNum>' % adv_add_path_num)
cmd += (' advertise add-path path-number %s' % adv_add_path_num)
if cmd:
cmds.append(cmd)
origin_as_valid = module.params['origin_as_valid']
if (origin_as_valid != 'no_use'):
conf_str += ('<originAsValid>%s</originAsValid>' % origin_as_valid)
vpls_enable = module.params['vpls_enable']
if (vpls_enable != 'no_use'):
conf_str += ('<vplsEnable>%s</vplsEnable>' % vpls_enable)
vpls_ad_disable = module.params['vpls_ad_disable']
if (vpls_ad_disable != 'no_use'):
conf_str += ('<vplsAdDisable>%s</vplsAdDisable>' % vpls_ad_disable)
update_pkt_standard_compatible = module.params['update_pkt_standard_compatible']
if (update_pkt_standard_compatible != 'no_use'):
conf_str += ('<updatePktStandardCompatible>%s</updatePktStandardCompatible>' % update_pkt_standard_compatible)
conf_str += CE_MERGE_BGP_PEER_AF_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if ('<ok/>' not in recv_xml):
module.fail_json(msg='Error: Merge bgp peer address family other failed.')
return cmds | [
"[email protected]"
] | |
df9dd24400578916c3d14c13ccc9926eddfabb48 | 38eb57300418e6f10433630437388f779ce50e09 | /cookie_and_session/app02_session/views.py | 25a4bbc4abf9387fc8de2e70f90c22b5c03e8db7 | [] | no_license | SelfShadows/Django-Flask | f37839f763133f0d62bffad3128171c426a1c038 | 13e32d1c8aac1532b43323e1891c423fe78f2813 | refs/heads/master | 2021-01-04T12:31:18.018508 | 2020-02-14T16:29:27 | 2020-02-14T16:29:27 | 240,550,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,688 | py | from django.shortcuts import render ,redirect
from functools import wraps
from django import views
# Django提供的工具,把函数装饰器转变为方法装饰器
from django.utils.decorators import method_decorator
from app02_session import models
def check_login(func):
@wraps(func) # 装饰器修复技术
def inner(request, *args, **kwargs):
# 获取seesion
ret = request.session.get("is_login")
# 1.获取cookie 中的随机字符串
# 2.根据随机字符串去数据库取 session_data --> 解密 --> 反序列化成字典
# 3.在字典里面 根据 is_login 取出具体数据
if ret == "1":
# 已经登陆过的 继续执行
return func(request, *args, **kwargs)
else:
# 没有登陆过的 跳转到登陆页面
next_url = request.path_info
return redirect("/app02/login/?next={}".format(next_url))
return inner
def login(request):
if request.method == "POST":
user = request.POST.get("user")
pwd = request.POST.get("pwd")
# 从url里面去除next参数
next_url = request.GET.get("next")
# 将所有Session失效日期小于当前日期的数据删除
request.session.clear_expired()
have_user = models.Person.objects.filter(username=user, password=pwd)
if have_user:
# 登录成功
# 告诉浏览器保存一个键值对
if next_url:
ret = redirect(next_url)
else:
ret = redirect("/app02/home/")
# 设置session
request.session["is_login"] = "1"
request.session["user_id"] = have_user[0].id
# 设置超时时间
request.session.set_expiry(5) # 5秒后失效
return ret
return render(request, "app02/login.html")
# 注销登陆函数
def logout(request):
# 只删除session数据
# request.session.delete()
# 删除session数据和cookie值
request.session.flush()
return redirect("/app02/login/")
@check_login
def home(request):
user_id = request.session.get("user_id")
user_obj = models.Person.objects.filter(id=user_id)
if user_obj:
return render(request, "app02/home.html", {"user_obj": user_obj[0]})
else:
return render(request, "app02/home.html", {"user_obj": "匿名用户"})
@check_login
def index(request):
return render(request, "app02/index.html")
class UserInfo(views.View):
# 把函数装饰器转变为方法装饰器
@method_decorator(check_login)
def get(self, request):
return render(request, "app02/userinfo.html") | [
"[email protected]"
] | |
e2e081e324e998a37d2a94a4d1659f2fbfec36c3 | dd3b3fc3cbb9a48d5056f39969f3e2be0e6abbaf | /venv/Scripts/pip3-script.py | cb3d85e6d3895a84278dc67a8e5d53ce243a4847 | [] | no_license | Pactortester/QDS_phone | c0c323dd44c22924d36a1c9fe8b13db354192c81 | 9844242e5a71de89c3cb994e70c40d3dfd7b0f35 | refs/heads/master | 2020-04-10T16:19:00.264023 | 2019-04-03T09:15:48 | 2019-04-03T09:15:48 | 161,141,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 387 | py | #!G:\QDS_phone\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
6f42046e26a53d45a6b0e199f1b66b160ac34a3f | 99d7765da35926279c4a4fd7313d55908786f4b8 | /0/2/2739/2739.py | 32df89b38143b4cce88cb8125277af2ebf5543fb | [
"MIT"
] | permissive | chr0m3/boj-codes | b8294c5d4d10a5af25b5276427bccd74d0866ef5 | d71d0a22d0a3ae62c225f382442461275f56fe8f | refs/heads/master | 2021-08-16T15:24:57.733088 | 2021-03-22T13:13:10 | 2021-03-22T13:13:10 | 91,523,558 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | a = input()
for i in range(0, 9):
print("%d * %d = %d" % (int(a), i + 1, int(a) * (i + 1)))
| [
"[email protected]"
] | |
9d2cd1f61430081aa4a65d8e29b28e23f51b088f | 85f6de6e3ef680cd717312233fd03c636c606550 | /src/two/rolling_a_dice.py | faf4234c08ca6aa9dc9b3cb20192a6fdd631a5dc | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Guillermogsjc/dissecting-reinforcement-learning | f8956455ffda22445ecc11fc6938da40ed4948e2 | 8a2751efa6d4a733df81c272c503b8061c70c04f | refs/heads/master | 2021-01-11T20:41:02.216522 | 2017-01-15T11:32:27 | 2017-01-15T11:32:27 | 79,168,192 | 1 | 0 | null | 2017-01-16T23:14:54 | 2017-01-16T23:14:53 | null | UTF-8 | Python | false | false | 611 | py | import numpy as np
#Trowing a dice for N times and evaluating the expectation
dice = np.random.randint(low=1, high=7, size=3)
print("Expectation (3 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=10)
print("Expectation (10 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=100)
print("Expectation (100 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=1000)
print("Expectation (1000 times): " + str(np.mean(dice)))
dice = np.random.randint(low=1, high=7, size=100000)
print("Expectation (100000 times): " + str(np.mean(dice)))
| [
"[email protected]"
] | |
fac85c5c169eaf142355c0655ac86fcd5f74fc09 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/surrounded_20200617223518.py | 233debe26db46593e2dfe08e99e70eb47ac5cf87 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,136 | py | def surronded(board):
# dfs
# untouched
# in progress
# finished
rows = len(board)
if rows == 0:
return
cols = len(board[0])
if cols == 0:
return
state = [[0]* cols for _ in range(rows)]
def canReachOutside(x,y,pending):
pending.append(x,y)
canReach = False
directions = [(1,0),(-1,0),(0,1),(0,-1)]
for dx,dy in directions:
nextX,nextY = dx+x,dy+y
if nextX < 0 or nextX >= rows or nextY < 0 or nextY >= cols:
canReach = True
continue
if board[nextX][nextY] == 'O' and state[nextX][nextY] == 0:
state[nextX][nextY] = 1
canReach != canReachOutside(nextX,nextY,pending)
return canReach
for x in range(rows):
for y in range(cols):
if [x][y] == '0' and state[x][y] == 0:
pending = []
if canReachOutside(x,y,pending):
# process states to change from o to x
pass
else:
# regulary process states
pass
| [
"[email protected]"
] | |
f7704d11de6500356f5a0264aa2a05b0534f42a0 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /spaceopt/gp_utils.py | dabf02d461833af563929d3c7310be1c9a08d714 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 8,808 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Preparing the GP utility functions for evaluting the search space scores."""
from typing import Any, Callable, Dict
import jax
import jax.numpy as jnp
import numpy as np
import sklearn
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process import kernels
from tensorflow_probability.substrates import jax as tfp
PRECISION = jax.lax.Precision.DEFAULT
def sqdist(x1, x2=None, precision=PRECISION):
"""Computes the matrix of squared distances between two tensors.
Args:
x1: (n, ...) shaped tensor.
x2: (m, ...) shaped tensor where x1.shape[1:] and x2.shape[1:] are assumed
to be compatible.
precision: argument for jax functions controlling the tradeoff between
accuracy and speed.
Returns:
out: (n, m) shaped array of squared distances between x1 and x2.
"""
if x2 is None:
x2 = x1
sum_axis = list(range(1, x1.ndim))
out = jnp.float32(-2) * jnp.tensordot(
x1, x2, (sum_axis, sum_axis), precision=precision)
out += jnp.sum(x1**2, axis=sum_axis)[:, jnp.newaxis]
out += jnp.sum(x2**2, axis=sum_axis)[jnp.newaxis]
return out
def matern_5_2(x, y, length_scale):
dists = jnp.sqrt(sqdist(x / length_scale, y / length_scale))
k = dists * jnp.sqrt(5.)
k = (1. + k + k ** 2 / 3.0) * jnp.exp(-k)
return k
PARAMS_BOUNDS = {
'amplitude': (0.05, 2.),
'noise': (0.0005, .1),
'lengthscale': (0.005, 20.)
}
N_RESTARTS_OPTIMIZER = 10
def cov_function_sklearn(params, nu = 5/2):
"""Generates a default covariance function.
Args:
params: A dictionary with GP hyperparameters.
nu: Degree of the matern kernel.
Returns:
cov_fun: an ARD Matern covariance function with diagonal noise for
numerical stability.
"""
amplitude = params['amplitude']
noise = params['noise']
lengthscale = params['lengthscale'].flatten()
amplitude_bounds = PARAMS_BOUNDS['amplitude']
lengthscale_bounds = PARAMS_BOUNDS['lengthscale']
noise_bounds = PARAMS_BOUNDS['noise']
cov_fun = kernels.ConstantKernel(
amplitude, constant_value_bounds=amplitude_bounds) * kernels.Matern(
lengthscale, nu=nu,
length_scale_bounds=lengthscale_bounds) + kernels.WhiteKernel(
noise, noise_level_bounds=noise_bounds)
return cov_fun
def cov_function_jax(params, x, y=None, add_noise=False):
"""Evaluates the default matern 5/2 covariance function."""
amplitude = params['amplitude']
noise = params['noise']
lengthscale = params['lengthscale'].flatten()
if y is None:
y = x
add_noise = True
cov = amplitude * matern_5_2(x, y, lengthscale)
if add_noise:
cov += np.eye(cov.shape[0]) * noise**2
return cov
def extract_params_from_sklearn_gp(gaussian_process):
"""Extracts parameter values from the fitted sklearn gp object.
Following https://arxiv.org/pdf/1206.2944.pdf we assume an ARD Matern 5/2
kernel with observation noise. The input to this function is a fitted sklearn
GP object and the output is a dictionary including the values of learned
hyperparameters and GP statistics.
Args:
gaussian_process: GP object from sklearn implementation.
Returns:
Dictionary of learned GP hyperparameters and statistics from the sklearn GP
implementation.
"""
kernel = gaussian_process.kernel_
assert isinstance(kernel, sklearn.gaussian_process.kernels.Sum)
matern_kernel = kernel.k1
noise_kernel = kernel.k2
assert isinstance(matern_kernel, sklearn.gaussian_process.kernels.Product)
assert isinstance(noise_kernel, sklearn.gaussian_process.kernels.WhiteKernel)
params = {
'noise': noise_kernel.noise_level,
'lengthscale': matern_kernel.k2.length_scale,
'amplitude': matern_kernel.k1.constant_value,
'l_': gaussian_process.L_,
# pylint: disable=protected-access
'y_train_std_': gaussian_process._y_train_std,
'y_train_mean_': gaussian_process._y_train_mean,
# pylint: enable=protected-access
'alpha_': gaussian_process.alpha_
}
return params
class GPUtils:
"""Class for GP utilities."""
def __init__(self,
cov_fun = None,
gp_noise_eps = 1e-5):
"""Initialize the GP class."""
self.cov_fun = cov_fun
self.gp_noise_eps = gp_noise_eps
def fit_gp(self,
x_obs,
y_obs,
params,
steps = 1000):
"""Fit a GP to the observed data and return the optimized params.
Args:
x_obs: (n, d) shaped array of n observed x-locations in dimension d.
y_obs: (n, 1) shaped array of objective values at x_obs.
params: A dictionary of model hyperparameters.
steps: Number of optimization steps.
Note that this argument is ignored for sklearn GP, however might be
included for other GP backends.
Returns:
Dictionary of learned parameters from the sklearn GP implementation.
"""
del steps
if self.cov_fun is None:
self.cov_fun = cov_function_sklearn(params)
gaussian_process = GaussianProcessRegressor(
kernel=self.cov_fun,
alpha=self.gp_noise_eps,
n_restarts_optimizer=N_RESTARTS_OPTIMIZER,
optimizer='fmin_l_bfgs_b')
gaussian_process.fit(np.array(x_obs), np.array(y_obs))
self.gaussian_process = gaussian_process
params = extract_params_from_sklearn_gp(gaussian_process)
return params
def posterior_mean_cov(self, params, x_obs,
y_obs, x_test):
"""Evaluate the posterior mean and cov of the test x-locations.
Args:
params: Dictionary of learned parameters from the sklearn GP
implementation.
x_obs: (n, d) shaped array of n observed x-locations in dimension d.
y_obs: (n, 1) shaped array of objective values at x_obs.
Note that this argument is ignored for sklearn GP since we alternatively
use the already calculated statistics from sklearn GP object, however
might be included for other GP backends.
x_test: (m, d) shaped array of m test x-locations in dimension d.
Returns:
mu: (m, 1) shaped array of mean at x_test.
cov: (m, m) shaped array of covariance at x_test.
"""
del y_obs
l_ = params['l_']
y_train_std_ = params['y_train_std_']
y_train_mean_ = params['y_train_mean_']
alpha_ = params['alpha_']
cross_cov = cov_function_jax(params, x_test, x_obs)
mu = cross_cov @ alpha_
mu = y_train_std_ * mu + y_train_mean_
v = jax.scipy.linalg.solve_triangular(l_, cross_cov.T, lower=True)
other_cov = cov_function_jax(params, x_test)
other_cov += jnp.eye(other_cov.shape[0]) * self.gp_noise_eps
cov = (other_cov - jnp.dot(v.T, v))
cov = jnp.outer(cov, y_train_std_ ** 2).reshape(*cov.shape, -1)
if cov.shape[2] == 1:
cov = jnp.squeeze(cov, axis=2)
return mu, cov
def draw_gp_samples(self,
key,
mu,
cov,
num_samples = 1,
method = 'cholesky',
tol = 1e-4):
"""Draw multivariate-normal samples given mu and cov.
Args:
key: a jax random.PRNGKey.
mu: (m, 1) shaped array of mean values.
cov: (m, m) shaped array of covariance values.
num_samples: number of samples.
method: method of sampling from 'own', 'cholesky', 'svd' and 'tfp'.
tol: additional tolerance for numerical stability issue.
Returns:
samples: (num_samples, m) shaped array of drawn samples.
"""
if (method == 'cholesky') or (method == 'svd'):
samples = jax.random.multivariate_normal(
key, mu.T, cov, shape=(num_samples,), method=method)
elif method == 'own':
y_rand = jax.random.normal(key, (num_samples, cov.shape[0]))
chol = jax.scipy.linalg.cholesky(
cov + jnp.eye(cov.shape[0]) * tol, lower=True)
samples = jnp.dot(y_rand, chol) + mu.T
elif method == 'tfp':
tfd = tfp.distributions
mvn = tfd.MultivariateNormalFullCovariance(
loc=mu.flatten(), covariance_matrix=cov)
samples = mvn.sample(num_samples, key)
else:
raise ValueError('Accepted methods include own, cholesky, svd and tfp.')
return samples
| [
"[email protected]"
] | |
d662e3cccc6393bf07124edfdf202bfc54925ebe | 7cf29923d278c5b934a40de216ac606c25c8a5eb | /wheelcms_axle/translate.py | 4f0f0c74ba26a7823c2018014ad16b58ddbffc3f | [
"BSD-2-Clause"
] | permissive | wheelcms/wheelcms_axle | 1df024f75d17544a575953359e3cc9a4ab56d93c | b5916b555f37b7baafdf08fd56b5b985688df9d0 | refs/heads/master | 2020-04-05T22:43:04.176353 | 2015-04-05T10:53:42 | 2015-04-05T10:53:42 | 7,800,085 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,095 | py | from django.conf import settings
any_lang = ('any', 'Any')
def languages():
languages = tuple(settings.CONTENT_LANGUAGES)
if any_lang not in languages:
languages = languages + (any_lang, )
return languages
def fallback_languages(language):
""" given a language, provide a list of alternatives, prioritized """
langs = [language]
if language != any_lang[0]:
langs.append(any_lang[0])
return langs
def language_slug(slugs, slug, language):
"""
slugs is a mapping of lang->slug,
slug is a default slug,
Try to get the appropriate slug from the mapping first,
else use the provided slug. If neither are present, return
*any* slug from the mapping
(XXX we might try settings.LANGUAGE first)
"""
lslug = slugs.get(language, slug)
if lslug is None and language == any_lang[0]:
## Use fallback? XXX
return slugs.values()[0] # any
if lslug is None:
return slugs.values()[0] # any
## may still be None, let caller fail, for now
return lslug
| [
"[email protected]"
] | |
fb48fd9656915149d8133355706be99ed2db0469 | a31de016611f3b4efc7a576e7113cad1a738419b | /_string_monster2.py | ba71783722b858478094721a871a759c7c6dd5c1 | [] | no_license | Ing-Josef-Klotzner/python | 9d4044d632672fff966b28ab80e1ef77763c78f5 | 3913729d7d6e1b7ac72b46db7b06ca0c58c8a608 | refs/heads/master | 2022-12-09T01:40:52.275592 | 2022-12-01T22:46:43 | 2022-12-01T22:46:43 | 189,040,355 | 0 | 0 | null | 2022-12-01T19:52:37 | 2019-05-28T14:05:16 | Python | UTF-8 | Python | false | false | 1,363 | py | #!/usr/bin/python3
from sys import stdin
def match (ssof, ss):
if ss == "": return True
#print (ssof, ss, end = " ")
for st in ssof:
if ss.startswith (st):
return match (ssof - {st}, ss [len (st):])
return False
# this works with testcases, because strings are included
# in order in sleepy string (hackerearth testcases)
# fails for sample test case where sleepy string chars are scrumbled
def main ():
read = stdin.readline
t = int (read ())
for t_ in range (t):
n = int (read ())
sof = [] # list of strings on floor
lns = [] # list of the string lengths
for n_ in range (n):
s = read ().rstrip ()
sof.append (s)
lns.append (len (s))
ss = read ().rstrip () # sleepy string
lnss = len (ss)
mnl = min (lns)
mxl = max (lns)
justone = 0
allother_max = 0
for n_ in range (n):
if lns [n_] == mnl: justone += 1
elif lns [n_] == mxl: allother_max += 1
if lnss < mnl or lnss > mnl and lnss < 2 * mnl or mnl == mxl and lnss % mnl or justone == 1 and allother_max == n - 1 and lnss % mxl not in {0, mnl}:
print ("NO")
continue
ssof = set (sof)
print ("YES" if match (ssof, ss) else "NO")
if __name__ == "__main__": main ()
| [
"[email protected]"
] | |
1f3f8ad62b3bff4ac0821b0fc51593df8ce0d5ce | c61c9bedba1968bfaf571ac3996b696fc35890a6 | /Chapter12/has_match.py | 00b6ca1068d542e225e1be731b69d6152b593ec3 | [] | no_license | ArunRamachandran/ThinkPython-Solutions | 497b3dbdeba1c64924fe1d9aa24204a9ca552c5b | 1a0872efd169e5d39b25134960168e3f09ffdc99 | refs/heads/master | 2020-04-01T10:23:20.255132 | 2014-11-07T17:04:52 | 2014-11-07T17:04:52 | 25,806,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | # has_match takes two sequences, t1 and t2, and returns True, if there is
# an index i such that t1[i] == t2[i]
def has_match(t1,t2):
for x,y in zip(t1,t2):
if x == y:
return True
else:
return False
t1 = "banana"
t2 = "sequence"
print "Given sequences are : "
print t1
print t2
case = has_match(t1,t2)
if case == True:
print "Yeah..!! Two sequences have a matching index "
if case == False:
print "Nope... It doesn't have a matching index !! "
| [
"[email protected]"
] | |
8f18a7a3cb0b452be92e2c21ca740144639a7e69 | 7e4a1838dbcbe0526f20b4b49f88a3f213dbc712 | /npcaller/fasta.py | 7d1d78befe1990ff329540e7f2e2e5f87acb256e | [
"MIT"
] | permissive | grst/nanopore_pkg | c5c8ee940ddd9218c08846ba5e5884c697914ca6 | e13ccfae0be79f23ae3270b09744726504b0e58f | refs/heads/master | 2023-04-02T14:38:52.410352 | 2020-11-06T19:34:37 | 2020-11-06T19:34:37 | 48,172,418 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,569 | py | """
Since skbio and Biopython are overkill and slightly to complicated most of the time
I came up with this really simple fasta-io class.
"""
from itertools import groupby
class FastaReader(object):
def __init__(self, file):
if not hasattr(file, 'read'):
self.file = open(file, 'r')
else:
self.file = file
def get_entries(self):
"""
Get the next Entry from the fasta file.
Returns: Generator, which yields (header, sequence) tuples
"""
for isheader, group in groupby(self.file, lambda line: line[0] == ">"):
if isheader:
header = next(group)[1:]
else:
seq = "".join(line.strip() for line in group)
yield header, seq
def close(self):
self.file.close()
class FastaWriter(object):
"""
Very simple fasta file format writer.
"""
SPLIT = 80
def __init__(self, file):
if not hasattr(file, 'write'):
self.file = open(file, 'w')
else:
self.file = file
def write_entry(self, header, sequence):
"""
Write Entry to File
Args:
header: >sequence_header
sequence: ACTGATT...
"""
sequence = [sequence[i:i+self.SPLIT] for i in range(0, len(sequence), self.SPLIT)]
self.file.write(">{0}\n".format(header))
for s in sequence:
self.file.write(s + "\n")
def flush(self):
self.file.flush()
def close(self):
self.file.close()
| [
"[email protected]"
] | |
fc77466e30f68146a40c8d3ba3b858f15859ddb5 | 19ddab74600f71700a6b693281d0180d5271f295 | /程序员面试金典/03_03_堆盘子.py | 2f96f3b2e8fb699bf5461a949729ba6f932d252c | [] | no_license | zhulf0804/Coding.Python | 4d55a430da1a8077c81feba65c13ac654aaf094a | 46ab03e23d15ebd5434ef4dd5ae99130000b00a5 | refs/heads/master | 2022-09-14T18:40:59.880941 | 2022-08-20T08:25:51 | 2022-08-20T08:25:51 | 213,113,482 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,000 | py | class StackOfPlates:
def __init__(self, cap: int):
self.stack = []
self.cap = cap
def push(self, val: int) -> None:
if self.cap == 0:
return
if len(self.stack) == 0 or len(self.stack[-1]) == self.cap:
self.stack.append([])
self.stack[-1].append(val)
def pop(self) -> int:
if self.cap == 0 or len(self.stack) == 0:
return -1
val = self.stack[-1].pop()
if len(self.stack[-1]) == 0:
self.stack = self.stack[:-1]
return val
def popAt(self, index: int) -> int:
if self.cap == 0 or index >= len(self.stack):
return -1
val = self.stack[index].pop()
if len(self.stack[index]) == 0:
self.stack = self.stack[:index] + self.stack[index+1:]
return val
# Your StackOfPlates object will be instantiated and called as such:
# obj = StackOfPlates(cap)
# obj.push(val)
# param_2 = obj.pop()
# param_3 = obj.popAt(index) | [
"[email protected]"
] | |
c4fd6afe113c170e2b3985c756cac05390668ae8 | e04dbc32247accf073e3089ed4013427ad182c7c | /hhkb2020/C TLE.py | 61c4d78700c4375a274fc85a2aa4fa2d73278a89 | [] | no_license | twobooks/atcoder_training | 9deb237aed7d9de573c1134a858e96243fb73ca0 | aa81799ec87cc9c9d76de85c55e99ad5fa7676b5 | refs/heads/master | 2021-10-28T06:33:19.459975 | 2021-10-20T14:16:57 | 2021-10-20T14:16:57 | 233,233,854 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py | import numpy as np # np.lcm(),np.gcd()
N = int(input())
arrP = np.array(input().split(),dtype=np.int64)
arrAll = np.arange(200000+1,dtype=np.int64)
mask = np.ones(200000+1,dtype=np.int64) == 1
for p in arrP:
mask[p] = False
print(arrAll[mask][0]) | [
"[email protected]"
] | |
27b8f49cb7a0e85b1fe35959e45a5d9c84dcb57b | dfb53581b4e6dbdc8e3789ea2678de1e1c4b5962 | /Django/mydjango01/news/views.py | 21a263f4be374c6a40d7fe19b8fd65329d2cf18d | [] | no_license | biabulinxi/Python-ML-DL | 7eff6d6898d72f00575045c5aa2acac45b4b0b82 | 217d594a3c0cba1e52550f74d100cc5023fb415b | refs/heads/master | 2020-06-01T09:13:17.314121 | 2019-06-08T03:59:36 | 2019-06-08T03:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 168 | py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("这是news的首页")
| [
"[email protected]"
] | |
3b199477395e73ead41b6374f4f1e0d538de6b1a | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /homophonous_logography/neural/transformer_model.py | 9264e7380f16b34f04cdfb65679049c04562b23b | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 21,306 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sequence-to-sequence transformer model.
Loosely based on:
https://blog.tensorflow.org/2019/05/transformer-chatbot-tutorial-with-tensorflow-2.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import numpy as np
import tensorflow as tf # tf
import homophonous_logography.neural.corpus as data
import homophonous_logography.neural.utils as utils
tf.config.run_functions_eagerly(False)
tf.compat.v1.disable_eager_execution()
def _create_padding_mask(x):
mask = tf.cast(tf.math.equal(x, 0), tf.float32)
# (batch_size, 1, 1, sequence length)
return mask[:, tf.newaxis, tf.newaxis, :]
def _create_look_ahead_mask(x):
seq_len = tf.shape(x)[1]
look_ahead_mask = 1 - tf.linalg.band_part(tf.ones((seq_len, seq_len)), -1, 0)
padding_mask = _create_padding_mask(x)
return tf.maximum(look_ahead_mask, padding_mask)
def _scaled_dot_product_attention(query, key, value, mask):
"""Actual attention function using dot product."""
matmul_qk = tf.matmul(query, key, transpose_b=True)
depth = tf.cast(tf.shape(key)[-1], tf.float32)
logits = matmul_qk / tf.math.sqrt(depth)
# add the mask zero out padding tokens.
if mask is not None:
logits += (mask * -1e9)
attention_weights = tf.nn.softmax(logits, axis=-1)
return tf.matmul(attention_weights, value), attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
"""Multi-head attention implementation."""
def __init__(self, d_model, num_heads, name="multi_head_attention"):
super(MultiHeadAttention, self).__init__(name=name)
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.query_dense = tf.keras.layers.Dense(units=d_model)
self.key_dense = tf.keras.layers.Dense(units=d_model)
self.value_dense = tf.keras.layers.Dense(units=d_model)
self.dense = tf.keras.layers.Dense(units=d_model)
def split_heads(self, inputs, batch_size):
inputs = tf.reshape(
inputs, shape=(batch_size, -1, self.num_heads, self.depth))
return tf.transpose(inputs, perm=[0, 2, 1, 3])
def call(self, inputs):
query, key, value, mask = inputs["query"], inputs["key"], inputs[
"value"], inputs["mask"]
batch_size = tf.shape(query)[0]
# linear layers
query = self.query_dense(query)
key = self.key_dense(key)
value = self.value_dense(value)
# split heads
query = self.split_heads(query, batch_size)
key = self.split_heads(key, batch_size)
value = self.split_heads(value, batch_size)
scaled_attention, attention_weights = _scaled_dot_product_attention(
query, key, value, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3])
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model))
outputs = self.dense(concat_attention)
return outputs, attention_weights
class PositionalEncoding(tf.keras.layers.Layer):
"""Trigonometric positional encoding."""
def __init__(self, position, d_model):
super(PositionalEncoding, self).__init__()
self.pos_encoding = self.positional_encoding(position, d_model)
def get_angles(self, position, i, d_model):
angles = 1 / tf.pow(10000, (2 * (i // 2)) / tf.cast(d_model, tf.float32))
return position * angles
def positional_encoding(self, position, d_model):
angle_rads = self.get_angles(
position=tf.range(position, dtype=tf.float32)[:, tf.newaxis],
i=tf.range(d_model, dtype=tf.float32)[tf.newaxis, :],
d_model=d_model)
# apply sin to even index in the array
sines = tf.math.sin(angle_rads[:, 0::2])
# apply cos to odd index in the array
cosines = tf.math.cos(angle_rads[:, 1::2])
pos_encoding = tf.concat([sines, cosines], axis=-1)
pos_encoding = pos_encoding[tf.newaxis, Ellipsis]
return tf.cast(pos_encoding, tf.float32)
def call(self, inputs):
return inputs + self.pos_encoding[:, :tf.shape(inputs)[1], :]
def _encoder_layer(units, d_model, num_heads, dropout, name="encoder_layer"):
"""One layer of the encoder."""
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention, _ = MultiHeadAttention(
d_model, num_heads, name="attention")({
"query": inputs,
"key": inputs,
"value": inputs,
"mask": padding_mask
})
attention = tf.keras.layers.Dropout(rate=dropout)(attention)
attention = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(inputs + attention)
outputs = tf.keras.layers.Dense(units=units, activation="relu")(attention)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention + outputs)
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
# Limit the lengths of input sequences.
_MAX_SEQUENCE_LENGTH = 500
def _encoder(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="encoder"):
"""Encoder component."""
inputs = tf.keras.Input(shape=(None,), name="inputs")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
embeddings = PositionalEncoding(_MAX_SEQUENCE_LENGTH, d_model)(embeddings)
outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)
for i in range(num_layers):
outputs = _encoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name="encoder_layer_{}".format(i),
)([outputs, padding_mask])
return tf.keras.Model(
inputs=[inputs, padding_mask], outputs=outputs, name=name)
def _decoder_layer(units, d_model, num_heads, dropout, name="decoder_layer"):
"""Single decoder layer."""
inputs = tf.keras.Input(shape=(None, d_model), name="inputs")
enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs")
look_ahead_mask = tf.keras.Input(
shape=(1, None, None), name="look_ahead_mask")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
attention1, attention_weights_block1 = MultiHeadAttention(
d_model, num_heads, name="attention_1")(inputs={
"query": inputs,
"key": inputs,
"value": inputs,
"mask": look_ahead_mask
})
attention1 = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention1 + inputs)
attention2, attention_weights_block2 = MultiHeadAttention(
d_model, num_heads, name="attention_2")(inputs={
"query": attention1,
"key": enc_outputs,
"value": enc_outputs,
"mask": padding_mask
})
attention2 = tf.keras.layers.Dropout(rate=dropout)(attention2)
attention2 = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(attention2 + attention1)
outputs = tf.keras.layers.Dense(units=units, activation="relu")(attention2)
outputs = tf.keras.layers.Dense(units=d_model)(outputs)
outputs = tf.keras.layers.Dropout(rate=dropout)(outputs)
outputs = tf.keras.layers.LayerNormalization(
epsilon=1e-6)(outputs + attention2)
return tf.keras.Model(
inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
outputs=[outputs, attention_weights_block1, attention_weights_block2],
name=name)
def _decoder(vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="decoder"):
"""Decoder component."""
inputs = tf.keras.Input(shape=(None,), name="inputs")
enc_outputs = tf.keras.Input(shape=(None, d_model), name="encoder_outputs")
look_ahead_mask = tf.keras.Input(
shape=(1, None, None), name="look_ahead_mask")
padding_mask = tf.keras.Input(shape=(1, 1, None), name="padding_mask")
embeddings = tf.keras.layers.Embedding(vocab_size, d_model)(inputs)
embeddings *= tf.math.sqrt(tf.cast(d_model, tf.float32))
embeddings = PositionalEncoding(_MAX_SEQUENCE_LENGTH, d_model)(embeddings)
outputs = tf.keras.layers.Dropout(rate=dropout)(embeddings)
attention_weights = {}
for i in range(num_layers):
outputs, attn_w_block1, attn_w_block2 = _decoder_layer(
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
name="decoder_layer_{}".format(i),
)(inputs=[outputs, enc_outputs, look_ahead_mask, padding_mask])
attention_weights["decoder_layer{}_block1".format(i+1)] = attn_w_block1
attention_weights["decoder_layer{}_block2".format(i+1)] = attn_w_block2
return tf.keras.Model(
inputs=[inputs, enc_outputs, look_ahead_mask, padding_mask],
outputs=[outputs, attention_weights],
name=name)
def _transformer(input_vocab_size,
target_vocab_size,
num_layers,
units,
d_model,
num_heads,
dropout,
name="transformer"):
"""Transformer network."""
inputs = tf.keras.Input(shape=(None,), name="inputs")
dec_inputs = tf.keras.Input(shape=(None,), name="dec_inputs")
enc_padding_mask = tf.keras.layers.Lambda(
_create_padding_mask, output_shape=(1, 1, None),
name="enc_padding_mask")(inputs)
# mask the future tokens for decoder inputs at the 1st attention block
look_ahead_mask = tf.keras.layers.Lambda(
_create_look_ahead_mask,
output_shape=(1, None, None),
name="look_ahead_mask")(dec_inputs)
# mask the encoder outputs for the 2nd attention block
dec_padding_mask = tf.keras.layers.Lambda(
_create_padding_mask, output_shape=(1, 1, None),
name="dec_padding_mask")(inputs)
enc_outputs = _encoder(
vocab_size=input_vocab_size,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
)(inputs=[inputs, enc_padding_mask])
dec_outputs, attention_weights = _decoder(
vocab_size=target_vocab_size,
num_layers=num_layers,
units=units,
d_model=d_model,
num_heads=num_heads,
dropout=dropout,
)(inputs=[dec_inputs, enc_outputs, look_ahead_mask, dec_padding_mask])
outputs = tf.keras.layers.Dense(units=target_vocab_size, name="outputs")(
dec_outputs)
model = tf.keras.Model(inputs=[inputs, dec_inputs],
outputs=[outputs, attention_weights], name=name)
model.summary()
return model
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Learning rate schedule."""
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
_TRAIN_STEP_SIGNATURE = [
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
tf.TensorSpec(shape=(None, None), dtype=tf.int32),
]
class Seq2SeqTransformerModel(object):
"""Full transformer model."""
def __init__(self,
batch_size=64,
num_heads=8,
ff_dim=512,
num_layers=4,
model_dim=128,
input_symbols=None,
output_symbols=None,
multihead_retrieval_strategy="AVERAGE",
model_dir=".",
name="model"):
self._batch_size = batch_size
self._input_symbols = input_symbols
self._input_vocab_size = len(input_symbols)
self._output_symbols = output_symbols
self._output_vocab_size = len(output_symbols)
self._num_heads = num_heads
self._num_layers = num_layers
self._multihead_retrieval = multihead_retrieval_strategy
self._transformer = _transformer(
input_vocab_size=self._input_vocab_size,
target_vocab_size=self._output_vocab_size,
num_layers=num_layers,
units=ff_dim,
d_model=model_dim,
num_heads=num_heads,
dropout=0.1)
self._learning_rate = CustomSchedule(model_dim)
self._optimizer = tf.keras.optimizers.Adam(
self._learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9)
self._loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction="none")
self._train_accuracy = tf.keras.metrics.Mean(name="train_accuracy")
self._name = name
self._checkpoint_dir = os.path.join(model_dir, self._name)
self._checkpoint_prefix = os.path.join(self._checkpoint_dir, "ckpt")
self._checkpoint = tf.train.Checkpoint(optimizer=self._optimizer,
transformer=self._transformer)
# Length of the current output tensor (for eval).
self._input_length = -1
self._output_length = -1
def _loss_function(self, y_true, y_pred):
loss = self._loss_object(y_true, y_pred)
mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)
loss = tf.multiply(loss, mask)
return tf.reduce_mean(loss)
def _accuracy_function(self, real, pred):
accuracies = tf.equal(real, tf.argmax(pred, output_type=tf.int32, axis=2))
mask = tf.math.logical_not(tf.math.equal(real, 0))
accuracies = tf.math.logical_and(mask, accuracies)
accuracies = tf.cast(accuracies, dtype=tf.float32)
mask = tf.cast(mask, dtype=tf.float32)
return tf.reduce_sum(accuracies) / tf.reduce_sum(mask)
@tf.function(input_signature=_TRAIN_STEP_SIGNATURE)
def _train_step(self, inputs, targets):
"""One step of the training."""
target_inputs = targets[:, :-1]
target_real = targets[:, 1:]
with tf.GradientTape() as tape:
predictions, _ = self._transformer(
inputs=[inputs, target_inputs], training=True)
loss = self._loss_function(target_real, predictions)
gradients = tape.gradient(loss, self._transformer.trainable_variables)
self._optimizer.apply_gradients(zip(gradients,
self._transformer.trainable_variables))
self._train_accuracy(self._accuracy_function(target_real, predictions))
return loss
def train(self, corpus, epochs=10, direction="pronounce", window=-1):
"""Runs training."""
# Create training log that also redirects to stdout.
stdout_file = sys.stdout
logfile = os.path.join(self._checkpoint_dir, "train.log")
print("Training log: {}".format(logfile))
sys.stdout = utils.DualLogger(logfile)
# Dump some parameters.
print(" Direction: {}".format(direction))
print(" # Epochs: {}".format(epochs))
print(" Batch size: {}".format(self._batch_size))
print(" Window size: {}".format(window))
print(" Max written len: {}".format(corpus.max_written_len))
print(" Max pron len: {}".format(corpus.max_pronounce_len))
print("Max written word len: {}".format(corpus.max_written_word_len))
print(" Max pron word len: {}".format(corpus.max_pronounce_word_len))
# Perform training.
best_total_loss = 1000000
nbatches = data.num_batches(corpus, self._batch_size, direction=direction,
window=window)
for epoch in range(epochs):
self._train_accuracy.reset_states()
start = time.time()
total_loss = 0
steps = 0
batches = data.batchify(corpus, self._batch_size, direction,
window=window)
batch, (inputs, targ) = next(batches)
while batch > -1:
bos = np.expand_dims(
[self._output_symbols.find("<s>")] * np.shape(targ)[0], 1)
targets = np.concatenate((bos, targ), axis=-1)
batch_loss = self._train_step(inputs, targets)
total_loss += batch_loss
if batch % 10 == 0:
print("Epoch {} Batch {} (/{}) Loss {:.4f}".format(
epoch + 1,
batch,
nbatches,
batch_loss))
steps += 1
batch, (inputs, targ) = next(batches)
total_loss /= steps
print("Epoch {} Loss {:.4f} Accuracy {:.4f}".format(
epoch + 1, total_loss, self._train_accuracy.result()))
if total_loss < best_total_loss:
self._checkpoint.save(file_prefix=self._checkpoint_prefix)
print("Saved checkpoint to {}".format(self._checkpoint_prefix))
best_total_loss = total_loss
print("Time taken for 1 epoch {} sec\n".format(
time.time() - start))
print("Best total loss: {:.4f}".format(best_total_loss))
# Restore stdout.
sys.stdout = stdout_file
def _get_attention(self, attention_weights):
"""Prepare attention for consumption.
Args:
attention_weights: tensor with shape:
(batch=1, num_heads, seq_len_q, seq_len_k).
Returns:
Accumulated attention.
"""
attention_heads = tf.squeeze( # Remove batch dimension.
attention_weights["decoder_layer%d_block2" % self._num_layers], 0)
# Basic sanity checks.
if len(attention_heads) != self._num_heads:
raise ValueError("Invalid number of attention heads: {}".format(
len(attention_heads)))
if len(attention_heads.shape) != 3:
raise ValueError("Invalid shape of attention weights: {}".format(
len(attention_heads.shape)))
if attention_heads.shape[1] > self._output_length:
raise ValueError("Expected output length <= {} for dim 1, got {}".format(
self._output_length, attention_heads.shape[1]))
elif attention_heads.shape[1] < self._output_length:
output_len_diff = self._output_length - attention_heads.shape[1]
attention_heads = tf.pad(attention_heads,
[[0, 0], [0, output_len_diff], [0, 0]])
if attention_heads.shape[2] != self._input_length:
raise ValueError("Expected input length {} for dim 2, got {}".format(
self._input_length, attention_heads.shape[2]))
# Combine.
if self._multihead_retrieval == "AVERAGE":
attention = tf.reduce_sum(attention_heads, axis=0) / self._num_heads
elif self._multihead_retrieval == "MAX":
attention = tf.reduce_max(attention_heads, axis=0)
else:
raise ValueError("Unknown retrieval strategy: {}".format(
self._multihead_retrieval))
return attention
@tf.function(reduce_retracing=True)
def _predict_step(self, encoder_input, output):
"""One prediction step."""
return self._transformer(
inputs=[encoder_input, output], training=False)
def decode(self, inputs, joiner=""):
"""Decodes the inputs."""
encoder_input = tf.convert_to_tensor([inputs], dtype=tf.int32)
# The first input to the transformer will be the start token.
start = [self._output_symbols.find("<s>")]
output = tf.convert_to_tensor(start, dtype=tf.int32)
output = tf.expand_dims(output, 0)
result = []
for _ in range(self._output_length):
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = self._predict_step(
encoder_input, output)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.argmax(predictions, axis=-1, output_type=tf.int32)
# concatentate the predicted_id to the output which is given to the
# decoder as its input.
output = tf.concat([output, predicted_id], axis=-1)
outsym = self._output_symbols.find(int(predicted_id.numpy()))
if outsym == "</s>" or outsym == "</targ>":
break
else:
result.append(outsym)
# Accumulate attention over all the heads.
attention = self._get_attention(attention_weights)
return joiner.join(result), attention.numpy()
def update_property(self, property_name, value):
setattr(self, property_name, value)
@property
def checkpoint(self):
return self._checkpoint
@property
def checkpoint_dir(self):
return self._checkpoint_dir
@property
def input_symbols(self):
return self._input_symbols
@property
def output_symbols(self):
return self._output_symbols
@property
def input_length(self):
return self._input_length
@property
def eval_mode(self):
return "_%s" % self._multihead_retrieval.lower()
| [
"[email protected]"
] | |
99b1f62912fb80c7e719697e2f9075d4fd505216 | 15b12d69ac3123d1562986970ce01d7a47d171de | /typings/nltk/translate/__init__.pyi | 79712704c982cb5c2d56cec50d1fde99fb9fb8ad | [
"Apache-2.0"
] | permissive | simplymanas/python-learning | 9b67b5a7acfb3a7c2455a7d1fc66203a2b419c37 | 75bc99c0dce211fd1bce5f6ce1155e0f4c71d7d0 | refs/heads/master | 2021-07-11T06:40:24.803589 | 2021-06-20T12:06:02 | 2021-06-20T12:06:02 | 241,769,614 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 768 | pyi | """
This type stub file was generated by pyright.
"""
from nltk.translate.api import AlignedSent, Alignment, PhraseTable
from nltk.translate.ibm_model import IBMModel
from nltk.translate.ibm1 import IBMModel1
from nltk.translate.ibm2 import IBMModel2
from nltk.translate.ibm3 import IBMModel3
from nltk.translate.ibm4 import IBMModel4
from nltk.translate.ibm5 import IBMModel5
from nltk.translate.bleu_score import sentence_bleu as bleu
from nltk.translate.ribes_score import sentence_ribes as ribes
from nltk.translate.meteor_score import meteor_score as meteor
from nltk.translate.metrics import alignment_error_rate
from nltk.translate.stack_decoder import StackDecoder
"""
Experimental features for machine translation.
These interfaces are prone to change.
"""
| [
"[email protected]"
] | |
ee75934b54a7c419ea4df630c94ae680bfee4f92 | ba0e07b34def26c37ee22b9dac1714867f001fa5 | /azure-mgmt-powerbiembedded/azure/mgmt/powerbiembedded/models/error_detail.py | 08890398d70c2163092510b29f7f60ffe5e56300 | [
"MIT"
] | permissive | CharaD7/azure-sdk-for-python | b11a08ac7d24a22a808a18203072b4c7bd264dfa | 9fdf0aac0cec8a15a5bb2a0ea27dd331dbfa2f5c | refs/heads/master | 2023-05-12T12:34:26.172873 | 2016-10-26T21:35:20 | 2016-10-26T21:35:20 | 72,448,760 | 1 | 0 | MIT | 2023-05-04T17:15:01 | 2016-10-31T15:14:09 | Python | UTF-8 | Python | false | false | 1,024 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ErrorDetail(Model):
"""ErrorDetail.
:param code:
:type code: str
:param message:
:type message: str
:param target:
:type target: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'target': {'key': 'target', 'type': 'str'},
}
def __init__(self, code=None, message=None, target=None):
self.code = code
self.message = message
self.target = target
| [
"[email protected]"
] | |
c2991b2bf462c17dd248db335305e4195ccdc8e3 | d40ee63566975dd11ae6ba6ea1c2889680c47c90 | /workspace/ros/aerostack_catkin_ws/devel/lib/python2.7/dist-packages/mavros_msgs/srv/_FileRemoveDir.py | 38c5a47514ff4a963c7222853176f534895d0c59 | [] | no_license | la16k/TFG_Laura | 45e9df0f60ef94572260f14346c47969ab2c73b3 | f5e0661aa7ccd200ba056a40beb9e687f5f0d06e | refs/heads/master | 2022-12-27T02:49:05.549777 | 2020-10-05T10:48:57 | 2020-10-05T10:48:57 | 301,374,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,221 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/FileRemoveDirRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FileRemoveDirRequest(genpy.Message):
_md5sum = "401d5cf5f836aaa9ebdc0897f75da874"
_type = "mavros_msgs/FileRemoveDirRequest"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# FTP::RemoveDir
#
# :success: indicates success end of request
# :r_errno: remote errno if applicapable
string dir_path
"""
__slots__ = ['dir_path']
_slot_types = ['string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
dir_path
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FileRemoveDirRequest, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.dir_path is None:
self.dir_path = ''
else:
self.dir_path = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self.dir_path
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.dir_path = str[start:end].decode('utf-8')
else:
self.dir_path = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self.dir_path
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.dir_path = str[start:end].decode('utf-8')
else:
self.dir_path = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from mavros_msgs/FileRemoveDirResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class FileRemoveDirResponse(genpy.Message):
_md5sum = "85394f2e941a8937ac567a617f06157f"
_type = "mavros_msgs/FileRemoveDirResponse"
_has_header = False # flag to mark the presence of a Header object
_full_text = """bool success
int32 r_errno
"""
__slots__ = ['success','r_errno']
_slot_types = ['bool','int32']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
success,r_errno
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(FileRemoveDirResponse, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.success is None:
self.success = False
if self.r_errno is None:
self.r_errno = 0
else:
self.success = False
self.r_errno = 0
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.success, _x.r_errno))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.success, _x.r_errno,) = _get_struct_Bi().unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Bi().pack(_x.success, _x.r_errno))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 5
(_x.success, _x.r_errno,) = _get_struct_Bi().unpack(str[start:end])
self.success = bool(self.success)
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Bi = None
def _get_struct_Bi():
global _struct_Bi
if _struct_Bi is None:
_struct_Bi = struct.Struct("<Bi")
return _struct_Bi
class FileRemoveDir(object):
_type = 'mavros_msgs/FileRemoveDir'
_md5sum = 'f140c5ef05b00c3cfc30d5a2061b4d63'
_request_class = FileRemoveDirRequest
_response_class = FileRemoveDirResponse
| [
"[email protected]"
] | |
33161c34e78739d53ded91e468cf82f429dfef1d | b170d37a81c09fd0dbb0edf3cff6296084b32af9 | /cexbot/command_utils.py | 7d0382b5e4f8d343853e41df961287aa984532fe | [
"MIT"
] | permissive | metaperl/cexbot | 8e17a7d5063a82675e002d926324e3c4a6eb6745 | 0dd0b60415afd9c1feb959186d32b1a683887975 | refs/heads/master | 2020-12-29T01:11:50.768031 | 2013-12-10T17:13:18 | 2013-12-10T17:13:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,754 | py | #!/usr/bin/env python
""" cexbot - command_utils.py
Default command line utitlities to run cexbot
"""
import os, sys, logging
import cexbot, config, parser, db, cexapi, updater, timer, cex
def main(argv=[]):
args = parser.get_parser()
verbose = 1
if args.verbose:
verbose = 2
if args.debug:
verbose = 3
if verbose>2:
log_level=logging.DEBUG
elif verbose==2:
log_level=logging.INFO
elif verbose==1:
log_level=logging.WARNING
elif verbose<1:
log_level=logging.ERROR
logging.basicConfig(level=log_level, format="%(asctime)s %(levelname)s: %(message)s")
if args.command == 'version':
print cexbot.get_version()
return True
# make sure this is always above command parsing
# print config
config.first_run()
if verbose == 3:
print args
if args.command == 'config':
if args.list:
return config.list()
elif args.edit:
return config.edit_config()
elif args.testauth:
return config.test_auth()
elif args.name and args.value:
v = config.set(args.name, args.value)
return config.cprint(args.name)
elif args.name:
return config.cprint(args.name)
logging.error('Invalid config option')
return 1
elif args.command == 'update':
return updater.check_update()
# not implemented
elif args.command == 'cleardata':
return config.clear_userdata()
ac = cexapi.CexAPI(config.get('cex.username'), config.get('cex.apikey'), config.get('cex.secret'))
dbi = db.DbManager()
cx = CexMethods(ac, dbi)
if args.command == 'balance':
print "Balance: %s BTC" % ac.get_balance()
return True
elif args.command == 'initdb':
return dbi.initdb()
elif args.command == 'getmarket':
return ac.get_market()
elif args.command == 'getprice':
return ac.get_market_quote()
elif args.command == 'order':
amount = args.amount
price = args.price
r = ac.place_order(amount, price)
logging.info("Ordered: %s" % r)
elif args.command == 'updatequotes':
logging.info('Running updatequotes')
ticker_timer = timer.ReqTimer(2, cx.update_ticker)
ticker_timer.start()
elif args.command == 'buybalance':
logging.info('Running buybalance')
balance_timer = timer.ReqTimer(5, ac.buy_balance)
balance_timer.start()
# @TODO __import__
# if args.task in cexbot.tasks:
# cexbot.tasks[args.task]()
def cl_error(msg=""):
print >> sys.stderr, msg
def run_cl(argv=[]):
try:
raise SystemExit(main(sys.argv))
except KeyboardInterrupt:
cl_error('Interrupted.')
raise SystemExit(-1)
def run_gui(argv=[]):
print "GUI coming soon."
# return None
try:
import cexbot.gui
cexbot.gui.main()
except Exception, e:
print "Error: %s" % str(e)
| [
"[email protected]"
] | |
20faeb3af99098aeae7f42e863b981e32e75deb0 | f8a053f287c66652adffd15624c85dcc0850d898 | /setup.py | 424d2c9837ce0ca5390c3445ddf06d2283a94b46 | [
"MIT"
] | permissive | heyongwei/zvt | cce9e9bac78c6acc5e73b517f80d1fa464342817 | 051106955a6a01707847ee56a447e2502a25ff46 | refs/heads/master | 2023-04-23T16:36:58.631045 | 2021-05-16T16:01:18 | 2021-05-16T16:01:18 | 363,716,402 | 0 | 0 | MIT | 2021-05-16T16:01:19 | 2021-05-02T17:59:26 | Python | UTF-8 | Python | false | false | 2,508 | py | #!/usr/bin/env python
# To use a consistent encoding
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
try:
# for pip >= 10
from pip._internal.req import parse_requirements
except ImportError:
# for pip <= 9.0.3
from pip.req import parse_requirements
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
install_reqs = parse_requirements("requirements.txt", session=False)
try:
requirements = [str(ir.req) for ir in install_reqs]
except:
requirements = [str(ir.requirement) for ir in install_reqs]
setup(
name='zvt',
version='0.9.3',
description='unified,modular quant framework for human beings ',
long_description=long_description,
url='https://github.com/zvtvz/zvt',
author='foolcage',
author_email='[email protected]',
classifiers=[ # Optional
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Customer Service',
'Intended Audience :: Education',
'Intended Audience :: Financial and Insurance Industry',
'Topic :: Software Development :: Build Tools',
'Topic :: Office/Business :: Financial :: Investment',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords='quant stock finance fintech big-data zvt technical-analysis trading-platform pandas fundamental-analysis',
packages=find_packages(include=['zvt.*', 'zvt']),
python_requires='>=3.5, <4',
include_package_data=True,
install_requires=requirements,
project_urls={ # Optional
'Bug Reports': 'https://github.com/zvtvz/zvt/issues',
'Funding': 'https://www.foolcage.com/zvt',
'Say Thanks!': 'https://saythanks.io/to/foolcage',
'Source': 'https://github.com/zvtvz/zvt',
},
long_description_content_type="text/markdown",
entry_points={
'console_scripts': [
'zvt = zvt.main:main',
'zvt_plugin = zvt.plugin:main',
'zvt_export = zvt.plugin:export',
],
},
)
| [
"[email protected]"
] | |
0a261a997e8b133dd2f20809de2b05a9df10aa1a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03041/s690420831.py | d69751f59907935676518728b9785bda095c49de | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | n, k = map(int, input().split())
s = str(input())
ans = ''
if s[k-1] == 'A':
ans = s[:k-1] + 'a' + s[k:]
print(ans)
exit()
elif s[k-1] == 'B':
ans = s[:k-1] + 'b' + s[k:]
print(ans)
exit()
elif s[k-1] == 'C':
ans = s[:k-1] + 'c' + s[k:]
print(ans)
exit() | [
"[email protected]"
] | |
feb5b5b9942b836a874b3a07264b9012e4b7df0b | 3f9bec3201cc255c5ad6023cc746488306224015 | /Chapter 13/Example_13-2.py | 08ddcf523baaba0c0e1dc8735da55bee0e9ae257 | [] | no_license | afettouhi/FluentPython-py37 | 64927a908c5804d8970ea3f4b667c109c5867a6a | a14a721d738b8908f9e8e78552d70fbb2d6dd74f | refs/heads/master | 2022-06-14T18:26:47.456090 | 2020-05-08T04:13:51 | 2020-05-08T04:13:51 | 259,222,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 199 | py | import decimal
ctx = decimal.getcontext()
ctx.prec = 40
one_third = decimal.Decimal('1') / decimal.Decimal('3')
one_third
one_third == +one_third
ctx.prec = 28
one_third == +one_third
+one_third
| [
"[email protected]"
] | |
ea566c781d0e6f7ed3612211e0138868a141630c | 780af071416ece1e1f6ead426e95155c3de209e9 | /notebooks/rv/__init__.py | 5918d5994e584d77b4e31b849f510e080fa8203b | [] | no_license | o-smirnov/docker-notebook | b2afd38cf16a1db9d3049c4ce79f7bc61c6183fb | 9cdb1f3fbaaca8edb94d9706a1e62410942a2f1a | refs/heads/master | 2021-01-22T17:14:03.346539 | 2015-05-07T12:31:01 | 2015-05-07T12:31:01 | 35,032,895 | 0 | 0 | null | 2015-05-04T12:15:27 | 2015-05-04T12:15:27 | null | UTF-8 | Python | false | false | 7,750 | py | import os, time, math, astropy, pyfits, traceback, fnmatch
from pandas import DataFrame, Series
import IPython.display
from IPython.display import Image, HTML, display
from rv.FITSFile import FITSFile
from rv.ImageFile import ImageFile
import matplotlib.pyplot as plt
NOTEBOOK_DIR = os.environ.get('RVNB_NOTEBOOK_DIR', '/notebooks')
RESULTDIR = os.environ.get('RVNB_DATA_DIR', '/notebooks/data')
ORIGINAL_RESULTDIR = os.environ.get('RVNB_ORIGINAL_DIR', '/notebooks/data')
WIDTH = None # globally fix a plot width (inches)
MINCOL = 2 # default min # of columns to display in thumbnail view
MAXCOL = 4 # default max # of columns to display in thumbnail view
MAXWIDTH = 16 # default width of thumbnail view (inches)
DPI = 80 # screen DPI
TIMEFORMAT = "%H:%M:%S %b %d"
astropy.log.setLevel('ERROR')
import os, time, math, astropy, pyfits, traceback, fnmatch
from pandas import DataFrame, Series
import IPython.display
from IPython.display import Image, HTML, display
import matplotlib.pyplot as plt
from rv.File import DataFile
from rv.Render import renderTitle,renderTable
class FileList(list):
_sort_attributes=dict(x="ext",n="basename",s="size",t="mtime")
def __init__(self, files=[], extcol=True, thumbs=None, title="", sort="xnt"):
list.__init__(self, files)
self._extcol = extcol
self._thumbs = thumbs
self._title = title
if sort:
self.sort(sort)
def sort(self, opt="xnt"):
"""Sort the filelist by name, eXtension, Time, Size, optionally Reverse"""
opt = opt.lower()
# build up order of comparison
cmpattr = []
for attr in opt:
if attr in self._sort_attributes:
cmpattr.append(self._sort_attributes[attr])
def compare(a, b, attrs=cmpattr):
for attr in attrs:
result = cmp(getattr(a,attr),getattr(b,attr))
if result:
return result
return 0
list.sort(self, cmp=compare, reverse='r' in opt)
self._init_df()
return self
def _init_df(self):
if self._extcol:
df_files = [(f.basename, f.ext, f.size, f.mtime_str) for f in self]
self._df = DataFrame(df_files,
columns=('name', 'ext', 'size',
'modified')) if df_files else None
else:
df_files = [(f.name, f.size, f.mtime_str) for f in self]
self._df = DataFrame(
df_files,
columns=('name', 'size', 'modified')) if df_files else None
def _repr_html_(self,ncol=1):
html = renderTitle(self._title)
if self._extcol:
labels = "name", "ext", "size", "modified"
data = [ (df.basename, df.ext, df.size_str, df.mtime_str) for df in self ]
links = [ (df.fullpath, df.fullpath, None, None) for df in self ]
else:
labels = "name", "size", "modified"
data = [ (df.basename, df.size_str, df.mtime_str) for df in self ]
links = [ (df.fullpath, None, None) for df in self ]
html += renderTable(data,labels,links=links,ncol=ncol)
return html
def show(self,ncol=1):
return IPython.display.display(HTML(self._repr_html_(ncol=ncol)))
def show_all(self):
for f in self:
f.show()
def __call__(self, pattern):
files = [f for f in self if fnmatch.fnmatch(f.name, pattern)]
return FileList(files,
extcol=self._extcol,
thumbs=self._thumbs,
title=os.path.join(self._title, pattern))
def thumbs(self, **kw):
kw['title'] = self._title
return self._thumbs(self, **kw) if self._thumbs else None
def __getslice__(self, *slc):
return FileList(list.__getslice__(self, *slc),
extcol=self._extcol,
thumbs=self._thumbs,
title="%s[%s]"%(self._title,":".join(map(str,slc))))
class DataDir(object):
"""This class represents a directory in the data folder"""
def __init__(self, name, files=[], root=""):
self.fullpath = name
if root and name.startswith(root):
name = name[len(root):]
if name.startswith("/"):
name = name[1:]
name = name or "."
self.name = self.path = name
self.mtime = os.path.getmtime(self.fullpath)
files = [ f for f in files if not f.startswith('.') ]
# our title, in HTML
self._title = os.path.join(ORIGINAL_RESULTDIR, self.path
if self.path is not "." else "")
# make list of DataFiles and sort by time
self.files = FileList([ DataFile(os.path.join(self.fullpath, f),
root=root) for f in files],
title=self._title)
# make separate lists of fits files and image files
self.fits = FileList([ f for f in self.files
if type(f) is FITSFile],
extcol=False,
thumbs=FITSFile._show_thumbs,
title="FITS files, " + self._title);
self.images = FileList([ f for f in self.files
if type(f) is ImageFile],
extcol=False,
thumbs=ImageFile._show_thumbs,
title="Images, " + self._title)
def sort(self, opt):
for f in self.files, self.fits, self.images:
f.sort(opt)
return self
def show(self):
return IPython.display.display(self)
def _repr_html_(self):
return renderTitle(self._title) + self.files._repr_html_()
class DirList(list):
def __init__(self, rootfolder=None, pattern="*", scan=True, title=None):
self._root = rootfolder = rootfolder or RESULTDIR
self._title = title or ORIGINAL_RESULTDIR
if scan:
for dir_, _, files in os.walk(rootfolder):
basename = os.path.basename(dir_)
if fnmatch.fnmatch(basename, pattern) and not basename.startswith("."):
self.append(DataDir(dir_, files, root=rootfolder))
self._sort()
def _sort(self):
self.sort(cmp=lambda x, y: cmp(x.name, y.name))
def _repr_html_(self):
html = renderTitle(self._title)
dirlist = []
for dir_ in self:
nfits = len(dir_.fits)
nimg = len(dir_.images)
nother = len(dir_.files) - nfits - nimg
dirlist.append(
(dir_.name, nfits, nimg, nother, time.strftime(TIMEFORMAT,time.localtime(dir_.mtime))))
html += renderTable(dirlist,
labels=("name", "# FITS", "# img", "# others", "modified"))
return html
def show(self):
return IPython.display.display(self)
def __call__(self, pattern):
return DirList(self._root, pattern,
title=os.path.join(self._title, pattern))
def __getslice__(self, *slc):
newlist = DirList(self._root, scan=False,
title="%s[%s]"%(self._title,":".join(map(str,slc))))
newlist += list.__getslice__(self, *slc)
newlist._sort()
return newlist
# def scandirs (datafolder=DATAFOLDER):
# """Scans all directories under datafolder and populates the DIRS list"""
# global DIRS;
# DIRS = DirList(datafolder);
# for name,ds in sorted(all_dirs):
# print "Contents of",name
# display(d)
| [
"[email protected]"
] | |
99cbf86713b07499e57c02d95ba061f54909e2b4 | 0aa150f1bfe3fdbdeaaeeaef5754c3e90378e935 | /yearapp/migrations/0034_auto_20191008_0609.py | 75095d6fe84241d240057f54d63809fb82a11f8f | [] | no_license | arshpreetsingh12/yearbook | 6232eba52330b36a7404317985aea4482befd101 | dac303e3cc448985256b44baae6e9baa4c8d8292 | refs/heads/master | 2020-08-07T19:57:00.281613 | 2019-10-11T13:41:49 | 2019-10-11T13:41:49 | 213,571,523 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | # Generated by Django 2.2.5 on 2019-10-08 06:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yearapp', '0033_sale'),
]
operations = [
migrations.AlterField(
model_name='invitation',
name='address',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='invitation',
name='name_of_venue',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='sale',
name='description',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
862491768d6eba456ebf0e1ea79d633839949c26 | 56f5b2ea36a2258b8ca21e2a3af9a5c7a9df3c6e | /CMGTools/H2TauTau/prod/TauES_test/nom/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374658142/HTT_24Jul_newTES_manzoni_Nom_Jobs/Job_149/run_cfg.py | 36401bb8144102988ca277182f23311dd0e887ef | [] | no_license | rmanzoni/HTT | 18e6b583f04c0a6ca10142d9da3dd4c850cddabc | a03b227073b2d4d8a2abe95367c014694588bf98 | refs/heads/master | 2016-09-06T05:55:52.602604 | 2014-02-20T16:35:34 | 2014-02-20T16:35:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,049 | py | import FWCore.ParameterSet.Config as cms
import os,sys
sys.path.append('/afs/cern.ch/user/m/manzoni/summer13/CMGTools/CMSSW_5_3_9/src/CMGTools/H2TauTau/prod/TauES_test/nom/emb/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0_1374658142/HTT_24Jul_newTES_manzoni_Nom_Jobs')
from base_cfg import *
process.source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
inputCommands = cms.untracked.vstring('keep *',
'drop cmgStructuredPFJets_cmgStructuredPFJetSel__PAT'),
lumisToProcess = cms.untracked.VLuminosityBlockRange( ("190645:10-190645:110", "190646:1-190646:111", "190659:33-190659:167", "190679:1-190679:55", "190688:69-190688:249",
"190702:51-190702:53", "190702:55-190702:122", "190702:124-190702:169", "190703:1-190703:252", "190704:1-190704:3",
"190705:1-190705:5", "190705:7-190705:65", "190705:81-190705:336", "190705:338-190705:350", "190705:353-190705:383",
"190706:1-190706:126", "190707:1-190707:237", "190707:239-190707:257", "190708:1-190708:189", "190733:71-190733:96",
"190733:99-190733:389", "190733:392-190733:460", "190736:1-190736:80", "190736:83-190736:185", "190738:1-190738:130",
"190738:133-190738:226", "190738:229-190738:349", "190782:55-190782:181", "190782:184-190782:233", "190782:236-190782:399",
"190782:401-190782:409", "190895:64-190895:202", "190895:210-190895:302", "190895:305-190895:584", "190895:587-190895:948",
"190906:73-190906:256", "190906:259-190906:354", "190906:356-190906:496", "190945:124-190945:207", "190949:1-190949:81",
"191043:45-191043:46", "191046:1-191046:21", "191046:24-191046:82", "191046:84-191046:88", "191046:92-191046:116",
"191046:119-191046:180", "191046:183", "191046:185-191046:239", "191056:1", "191056:4-191056:9",
"191056:16-191056:17", "191056:19", "191057:1", "191057:4-191057:40", "191062:1",
"191062:3", "191062:5-191062:214", "191062:216-191062:541", "191090:1-191090:55", "191201:38-191201:49",
"191201:52-191201:79", "191202:1-191202:64", "191202:66-191202:68", "191202:87-191202:105", "191202:108-191202:118",
"191226:77-191226:78", "191226:81-191226:831", "191226:833-191226:1454", "191226:1456-191226:1466", "191226:1469-191226:1507",
"191226:1510-191226:1686", "191247:1-191247:153", "191247:156-191247:280", "191247:283-191247:606", "191247:608-191247:620",
"191247:622-191247:818", "191247:821-191247:834", "191247:837-191247:1031", "191247:1034-191247:1046", "191247:1049-191247:1140",
"191247:1143-191247:1187", "191247:1190-191247:1214", "191247:1217-191247:1224", "191248:1-191248:103", "191264:59-191264:79",
"191264:82-191264:152", "191264:155-191264:189", "191271:56-191271:223", "191271:225-191271:363", "191276:1-191276:16",
"191277:1-191277:28", "191277:30-191277:164", "191277:167-191277:253", "191277:255-191277:457", "191277:460-191277:535",
"191277:537-191277:576", "191277:579-191277:775", "191277:778-191277:811", "191277:813-191277:849", "191367:1-191367:2",
"191411:1-191411:23", "191695:1", "191718:43-191718:95", "191718:98-191718:207", "191720:1",
"191720:3-191720:15", "191720:17-191720:181", "191721:1", "191721:3-191721:34", "191721:36-191721:183",
"191721:186-191721:189", "191726:1-191726:13", "191810:15", "191810:22-191810:49", "191810:52-191810:92",
"191830:54-191830:242", "191830:245-191830:301", "191830:304-191830:393", "191833:1", "191833:3-191833:103",
"191834:1-191834:30", "191834:33-191834:74", "191834:77-191834:299", "191834:302-191834:352", "191837:1-191837:44",
"191837:47-191837:53", "191837:56-191837:65", "191856:1-191856:133", "191859:1-191859:28", "191859:31-191859:126",
"193093:1-193093:33", "193123:1-193123:27", "193124:1-193124:52", "193192:58-193192:86", "193193:1-193193:6",
"193193:8", "193193:11-193193:83", "193193:86-193193:120", "193193:122-193193:160", "193193:162-193193:274",
"193193:276-193193:495", "193193:497-193193:506", "193207:54-193207:182", "193334:29-193334:172", "193336:1-193336:264",
"193336:267-193336:492", "193336:495-193336:684", "193336:687-193336:729", "193336:732-193336:951", "193541:77-193541:101",
"193541:103-193541:413", "193541:416-193541:575", "193541:578-193541:619", "193556:41-193556:83", "193557:1-193557:84",
"193575:48-193575:173", "193575:176-193575:349", "193575:351-193575:394", "193575:397-193575:415", "193575:417-193575:658",
"193575:660-193575:752", "193621:60-193621:570", "193621:573-193621:769", "193621:772-193621:976", "193621:979-193621:1053",
"193621:1056-193621:1137", "193621:1139-193621:1193", "193621:1195-193621:1371", "193621:1373-193621:1654", "193834:1-193834:35",
"193835:1-193835:20", "193835:22-193835:26", "193836:1-193836:2", "193998:66-193998:113", "193998:115-193998:278",
"193999:1-193999:45", "194027:57-194027:113", "194050:53-194050:113", "194050:116-194050:273", "194050:275-194050:355",
"194050:357-194050:369", "194050:372-194050:391", "194050:394-194050:490", "194050:492-194050:814", "194050:816-194050:1435",
"194050:1437-194050:1735", "194050:1760-194050:1888", "194051:1-194051:12", "194052:1-194052:99", "194052:102-194052:166",
"194075:48-194075:101", "194075:103", "194075:105-194075:107", "194075:109", "194075:111",
"194076:1-194076:9", "194076:11-194076:55", "194076:58-194076:163", "194076:165-194076:228", "194076:230-194076:264",
"194076:267-194076:507", "194076:509-194076:527", "194076:530-194076:538", "194076:541-194076:562", "194076:565-194076:748",
"194108:81-194108:161", "194108:164-194108:264", "194108:266-194108:373", "194108:376-194108:396", "194108:398-194108:433",
"194108:436-194108:452", "194108:454-194108:577", "194108:579-194108:590", "194108:593-194108:668", "194108:671-194108:872",
"194115:66-194115:184", "194115:186-194115:338", "194115:340-194115:346", "194115:348-194115:493", "194115:496-194115:731",
"194115:819-194115:857", "194117:1-194117:38", "194119:1-194119:229", "194119:232-194119:261", "194120:1-194120:162",
"194120:165-194120:406", "194150:42-194150:127", "194150:129-194150:261", "194150:264-194150:311", "194151:47-194151:72",
"194151:75-194151:191", "194151:193-194151:238", "194151:240-194151:617", "194151:619", "194151:621",
"194151:623", "194153:1-194153:115", "194199:96-194199:227", "194199:229-194199:336", "194199:339-194199:402",
"194210:3-194210:195", "194210:198-194210:217", "194210:220-194210:359", "194210:361-194210:555", "194223:61-194223:112",
"194224:1-194224:126", "194224:129-194224:206", "194224:208-194224:250", "194224:253-194224:309", "194224:312-194224:386",
"194224:389-194224:412", "194225:1-194225:23", "194225:26-194225:47", "194225:49-194225:85", "194225:88-194225:149",
"194270:56-194270:68", "194303:56-194303:66", "194303:69-194303:102", "194304:1-194304:43", "194304:46",
"194305:1-194305:84", "194314:52-194314:130", "194314:133-194314:300", "194315:1-194315:10", "194315:13-194315:314",
"194315:317-194315:428", "194315:431-194315:452", "194315:455-194315:467", "194317:1-194317:20", "194424:63-194424:141",
"194424:144-194424:195", "194424:198-194424:266", "194424:268-194424:421", "194424:424-194424:478", "194424:481-194424:531",
"194424:534-194424:553", "194424:556-194424:706", "194424:708", "194428:1-194428:85", "194428:87-194428:122",
"194428:125-194428:294", "194428:296-194428:465", "194429:1-194429:4", "194429:7-194429:54", "194429:57-194429:147",
"194429:150-194429:411", "194429:413-194429:742", "194429:745-194429:986", "194429:988-194429:1019", "194439:46-194439:77",
"194439:79-194439:106", "194455:45-194455:64", "194455:67-194455:140", "194455:142-194455:255", "194455:293-194455:303",
"194464:1-194464:127", "194464:130-194464:142", "194464:145-194464:210", "194479:1-194479:44", "194479:165-194479:232",
"194479:235-194479:262", "194479:265-194479:374", "194479:377-194479:431", "194479:434-194479:489", "194479:492-194479:529",
"194479:531-194479:566", "194480:1-194480:32", "194480:34-194480:205", "194480:207-194480:375", "194480:377-194480:387",
"194480:389-194480:759", "194480:762-194480:956", "194480:959-194480:1402", "194533:46-194533:379", "194533:382-194533:415",
"194533:417-194533:618", "194533:620-194533:872", "194619:31-194619:110", "194631:1-194631:42", "194631:44-194631:100",
"194631:102-194631:169", "194631:171-194631:222", "194643:1-194643:287", "194644:1-194644:168", "194644:171-194644:181",
"194644:184-194644:185", "194644:187-194644:319", "194644:321-194644:421", "194691:61-194691:104", "194691:107-194691:155",
"194691:158-194691:251", "194691:254-194691:268", "194691:271-194691:272", "194691:275-194691:289", "194691:292-194691:313",
"194699:1-194699:30", "194699:32-194699:52", "194699:55-194699:64", "194699:67-194699:71", "194699:73-194699:154",
"194699:157-194699:215", "194699:218-194699:238", "194699:241-194699:259", "194702:1-194702:138", "194702:141-194702:191",
"194704:1-194704:41", "194704:44-194704:545", "194704:548-194704:592", "194711:1-194711:7", "194711:9-194711:619",
"194712:1-194712:56", "194712:61-194712:418", "194712:420-194712:625", "194712:627-194712:759", "194735:44-194735:71",
"194735:74-194735:101", "194735:104-194735:130", "194778:60-194778:118", "194778:120-194778:219", "194789:1-194789:18",
"194789:21-194789:32", "194789:34-194789:80", "194789:82-194789:166", "194789:168-194789:269", "194789:272-194789:405",
"194789:409-194789:414", "194789:417-194789:427", "194789:430-194789:566", "194790:1-194790:45", "194825:72-194825:117",
"194825:120-194825:221", "194896:34-194896:55", "194896:58-194896:79", "194896:82-194896:103", "194897:1-194897:6",
"194897:8-194897:78", "194897:80-194897:96", "194897:98-194897:102", "194912:53-194912:70", "194912:72-194912:96",
"194912:98-194912:444", "194912:446-194912:450", "194912:453-194912:467", "194912:470-194912:561", "194912:564-194912:660",
"194912:663-194912:813", "194912:815-194912:840", "194912:843-194912:864", "194912:866-194912:1004", "194912:1007-194912:1025",
"194912:1027-194912:1067", "194912:1069-194912:1137", "194912:1140-194912:1166", "194912:1168-194912:1249", "194912:1251-194912:1304",
"194912:1307-194912:1444", "194912:1447-194912:1487", "194912:1489-194912:1503", "194912:1506-194912:1662", "194914:1-194914:38",
"194915:1-194915:74", "195013:94-195013:144", "195013:146-195013:185", "195013:187-195013:206", "195013:208-195013:299",
"195013:302-195013:324", "195013:326-195013:366", "195013:369-195013:447", "195013:450-195013:526", "195013:528-195013:541",
"195014:1-195014:6", "195014:9-195014:119", "195014:121-195014:148", "195015:1-195015:13", "195016:1-195016:21",
"195016:23-195016:55", "195016:58-195016:63", "195016:65-195016:174", "195016:177-195016:184", "195016:186-195016:241",
"195016:243-195016:246", "195016:248-195016:251", "195016:254-195016:367", "195016:370-195016:422", "195016:425-195016:560",
"195016:563-195016:569", "195099:70-195099:144", "195099:147-195099:186", "195099:189-195099:208", "195099:211-195099:224",
"195099:227-195099:248", "195109:98-195109:241", "195112:1-195112:12", "195112:15-195112:26", "195113:1-195113:209",
"195113:212-195113:388", "195113:391-195113:403", "195113:406-195113:419", "195113:422-195113:492", "195113:495-195113:579",
"195114:1-195114:69", "195114:72-195114:103", "195115:1-195115:7", "195115:10-195115:22", "195147:132-195147:282",
"195147:285-195147:294", "195147:297-195147:331", "195147:334-195147:363", "195147:366-195147:442", "195147:445-195147:536",
"195147:539-195147:559", "195163:72-195163:138", "195163:140-195163:224", "195163:227-195163:240", "195163:243",
"195163:246-195163:347", "195164:1-195164:64", "195165:1-195165:4", "195165:7-195165:41", "195165:44-195165:54",
"195165:56-195165:153", "195165:156-195165:260", "195165:263-195165:266", "195251:1-195251:131", "195251:134-195251:137",
"195251:140-195251:152", "195251:154-195251:165", "195251:167-195251:242", "195303:109-195303:191", "195303:194-195303:277",
"195303:280-195303:310", "195303:312-195303:316", "195303:318-195303:409", "195304:1-195304:3", "195304:6-195304:22",
"195304:27-195304:80", "195304:83-195304:100", "195304:103-195304:154", "195304:157-195304:341", "195304:344-195304:588",
"195304:590-195304:727", "195304:729-195304:1003", "195304:1006-195304:1079", "195304:1083-195304:1140", "195304:1143-195304:1229",
"195378:90-195378:117", "195378:120-195378:127", "195378:130-195378:185", "195378:187-195378:204", "195378:206-195378:302",
"195378:305-195378:542", "195378:544-195378:565", "195378:567-195378:645", "195378:647-195378:701", "195378:703-195378:734",
"195378:737-195378:1120", "195378:1122-195378:1133", "195390:1", "195390:4-195390:27", "195390:30-195390:145",
"195390:147-195390:183", "195390:186-195390:187", "195390:190-195390:208", "195390:210-195390:213", "195390:215-195390:400",
"195396:49-195396:55", "195396:58-195396:63", "195396:66-195396:131", "195397:1-195397:10", "195397:12-195397:89",
"195397:92-195397:120", "195397:123-195397:141", "195397:143-195397:251", "195397:253", "195397:256-195397:475",
"195397:478-195397:525", "195397:527-195397:608", "195397:611-195397:776", "195397:779-195397:970", "195397:972-195397:1121",
"195397:1123-195397:1181", "195397:1184-195397:1198", "195397:1200-195397:1209", "195398:3-195398:137", "195398:139-195398:494",
"195398:497-195398:585", "195398:587-195398:817", "195398:820-195398:824", "195398:827-195398:1225", "195398:1228-195398:1307",
"195398:1309-195398:1712", "195398:1721-195398:1736", "195398:1741-195398:1752", "195398:1767-195398:1795", "195399:1-195399:192",
"195399:194-195399:382", "195530:1-195530:80", "195530:82-195530:104", "195530:107-195530:156", "195530:159-195530:300",
"195530:302-195530:405", "195540:68-195540:123", "195540:126-195540:137", "195540:140-195540:283", "195540:286-195540:319",
"195551:91-195551:106", "195552:1-195552:21", "195552:23-195552:27", "195552:30-195552:147", "195552:149-195552:155",
"195552:158-195552:182", "195552:185-195552:287", "195552:290-195552:349", "195552:352-195552:469", "195552:472-195552:815",
"195552:818-195552:823", "195552:825-195552:883", "195552:885-195552:1152", "195552:1154-195552:1300", "195552:1303-195552:1789",
"195633:40-195633:42", "195647:1-195647:41", "195649:1-195649:69", "195649:72-195649:151", "195649:154-195649:181",
"195649:183-195649:247", "195655:1-195655:129", "195655:131-195655:184", "195655:186-195655:260", "195655:263-195655:350",
"195655:353-195655:446", "195655:448-195655:483", "195655:485-195655:498", "195656:1-195656:362", "195658:1-195658:37",
"195658:40-195658:362", "195658:364-195658:382", "195658:384-195658:386", "195749:1-195749:8", "195749:10-195749:33",
"195749:36-195749:131", "195757:1-195757:82", "195757:85-195757:115", "195757:118-195757:161", "195757:163-195757:206",
"195758:1-195758:18", "195774:1-195774:13", "195774:16-195774:137", "195774:139-195774:151", "195774:154-195774:162",
"195774:164-195774:256", "195774:258-195774:276", "195774:279-195774:362", "195774:365-195774:466", "195774:469-195774:618",
"195774:620-195774:649", "195774:651-195774:830", "195775:1-195775:57", "195775:60-195775:100", "195775:103-195775:170",
"195776:1-195776:63", "195776:66-195776:283", "195776:286-195776:337", "195776:340-195776:399", "195776:401-195776:409",
"195776:411-195776:477", "195841:74-195841:85", "195868:1-195868:88", "195868:90-195868:107", "195868:110-195868:205",
"195915:1-195915:109", "195915:111-195915:275", "195915:278-195915:390", "195915:393-195915:417", "195915:419-195915:429",
"195915:432-195915:505", "195915:507-195915:747", "195915:749-195915:785", "195915:787-195915:828", "195915:830-195915:850",
"195916:1-195916:16", "195916:19-195916:68", "195916:71-195916:212", "195917:1-195917:4", "195918:1-195918:44",
"195918:46", "195918:49-195918:64", "195919:1-195919:15", "195923:1-195923:14", "195925:1-195925:12",
"195926:1", "195926:3-195926:19", "195926:21-195926:34", "195929:1-195929:29", "195930:1-195930:77",
"195930:80-195930:176", "195930:179-195930:526", "195930:529-195930:596", "195937:1-195937:28", "195937:31-195937:186",
"195937:188-195937:396", "195947:23-195947:62", "195947:64-195947:88", "195948:51-195948:116", "195948:119-195948:144",
"195948:147", "195948:150-195948:352", "195948:355-195948:369", "195948:372-195948:402", "195948:404-195948:500",
"195948:503-195948:540", "195948:543-195948:565", "195948:567-195948:602", "195948:605-195948:615", "195950:1-195950:71",
"195950:73-195950:138", "195950:141-195950:169", "195950:172-195950:332", "195950:335-195950:350", "195950:353-195950:382",
"195950:385-195950:421", "195950:424-195950:450", "195950:453-195950:483", "195950:485-195950:616", "195950:619-195950:715",
"195950:718-195950:787", "195950:789-195950:800", "195950:803-195950:829", "195950:831", "195950:833-195950:1587",
"195963:54-195963:58", "195970:44-195970:49", "195970:51-195970:85", "196019:54-196019:68", "196027:1-196027:55",
"196027:58-196027:119", "196027:121-196027:155", "196027:158-196027:186", "196046:12-196046:40", "196047:1-196047:64",
"196047:70-196047:75", "196048:1-196048:44", "196048:46-196048:48", "196197:58-196197:122", "196197:125-196197:179",
"196197:181-196197:311", "196197:313-196197:516", "196197:519-196197:562", "196199:1-196199:33", "196199:36-196199:83",
"196199:86-196199:118", "196199:121-196199:147", "196199:150-196199:237", "196199:239-196199:285", "196199:287-196199:534",
"196200:1-196200:68", "196202:3-196202:61", "196202:64-196202:108", "196203:1-196203:102", "196203:107-196203:117",
"196218:55-196218:199", "196218:201-196218:224", "196218:226-196218:393", "196218:396-196218:494", "196218:496-196218:741",
"196218:744-196218:752", "196218:754-196218:757", "196218:759-196218:820", "196239:1-196239:59", "196239:62-196239:154",
"196239:157-196239:272", "196239:274-196239:373", "196239:375-196239:432", "196239:435-196239:465", "196239:468-196239:647",
"196239:650-196239:706", "196239:709-196239:1025", "196249:63-196249:77", "196249:80-196249:99", "196250:1-196250:2",
"196250:5-196250:265", "196250:267-196250:426", "196252:1-196252:35", "196334:59-196334:111", "196334:113-196334:123",
"196334:126-196334:132", "196334:135-196334:167", "196334:170-196334:193", "196334:196-196334:257", "196334:259-196334:267",
"196334:270-196334:289", "196334:292-196334:342", "196349:65-196349:84", "196349:86-196349:154", "196349:157-196349:244",
"196349:246-196349:258", "196357:1-196357:4", "196359:1-196359:2", "196362:1-196362:88", "196363:1-196363:8",
"196363:11-196363:34", "196364:1-196364:93", "196364:96-196364:136", "196364:139-196364:365", "196364:368-196364:380",
"196364:382-196364:601", "196364:603-196364:795", "196364:798-196364:884", "196364:887-196364:1196", "196364:1199-196364:1200",
"196364:1203-196364:1299", "196437:1", "196437:3-196437:74", "196437:77-196437:169", "196438:1-196438:181",
"196438:184-196438:699", "196438:701-196438:1269", "196452:82-196452:112", "196452:114-196452:490", "196452:493-196452:586",
"196452:589-196452:618", "196452:622-196452:668", "196452:671-196452:716", "196452:718-196452:726", "196452:728-196452:956",
"196452:958-196452:1004", "196452:1007-196452:1091", "196453:1-196453:74", "196453:77-196453:145", "196453:147-196453:669",
"196453:673-196453:714", "196453:717-196453:799", "196453:802-196453:988", "196453:991-196453:1178", "196453:1180",
"196453:1182-196453:1248", "196453:1250-196453:1528", "196453:1531-196453:1647", "196495:114-196495:180", "196495:182-196495:272",
"196509:1-196509:68", "196531:62-196531:150", "196531:152-196531:253", "196531:256-196531:285", "196531:288-196531:302",
"196531:305-196531:422", "196531:425-196531:440", "198049:1-198049:11", "198049:14-198049:57", "198050:2-198050:155",
"198063:1-198063:37", "198063:40-198063:72", "198063:74-198063:124", "198063:127-198063:294", "198116:36-198116:52",
"198116:54-198116:55", "198116:58-198116:96", "198116:98-198116:112", "198207:1-198207:97", "198208:1-198208:92",
"198208:94-198208:134", "198208:137-198208:147", "198208:150-198208:209", "198210:1-198210:221", "198212:1-198212:574",
"198213:1-198213:107", "198215:1-198215:12", "198230:1-198230:33", "198230:36-198230:57", "198230:60-198230:235",
"198230:237-198230:324", "198230:326-198230:388", "198230:390-198230:459", "198230:462-198230:625", "198230:627-198230:651",
"198230:653-198230:805", "198230:808-198230:811", "198230:814-198230:948", "198230:950-198230:1090", "198230:1093-198230:1103",
"198230:1106-198230:1332", "198230:1335-198230:1380", "198249:1-198249:7", "198269:3-198269:198", "198271:1-198271:91",
"198271:93-198271:170", "198271:173-198271:299", "198271:301-198271:450", "198271:453-198271:513", "198271:516-198271:616",
"198271:619-198271:628", "198271:631-198271:791", "198271:793-198271:797", "198272:1-198272:185", "198272:188-198272:245",
"198272:248-198272:314", "198272:317-198272:433", "198272:436-198272:444", "198272:454-198272:620", "198346:44-198346:47",
"198372:57-198372:110", "198485:68-198485:109", "198485:112-198485:134", "198485:136-198485:181", "198485:184-198485:239",
"198487:1-198487:145", "198487:147-198487:514", "198487:517-198487:668", "198487:671-198487:733", "198487:736-198487:757",
"198487:760-198487:852", "198487:854-198487:994", "198487:997-198487:1434", "198487:1437-198487:1610", "198522:65-198522:144",
"198522:147-198522:208", "198941:102-198941:189", "198941:191-198941:220", "198941:222-198941:241", "198941:243-198941:249",
"198941:252-198941:284", "198954:108-198954:156", "198954:159-198954:277", "198955:1-198955:45", "198955:47-198955:50",
"198955:53-198955:220", "198955:223-198955:269", "198955:271-198955:284", "198955:286-198955:338", "198955:340-198955:580",
"198955:583-198955:742", "198955:744-198955:910", "198955:913-198955:946", "198955:949-198955:1162", "198955:1165-198955:1169",
"198955:1172-198955:1182", "198955:1185-198955:1188", "198955:1190-198955:1246", "198955:1249-198955:1304", "198955:1306-198955:1467",
"198955:1470-198955:1485", "198955:1487-198955:1552", "198969:58-198969:81", "198969:84-198969:247", "198969:249-198969:323",
"198969:325-198969:365", "198969:367-198969:413", "198969:416-198969:466", "198969:468-198969:643", "198969:646-198969:918",
"198969:920-198969:1011", "198969:1013-198969:1175", "198969:1178-198969:1236", "198969:1239-198969:1253", "199008:75-199008:93",
"199008:95-199008:121", "199008:124-199008:208", "199008:211-199008:331", "199008:333-199008:373", "199008:376-199008:482",
"199008:485-199008:605", "199008:608-199008:644", "199011:1-199011:11", "199011:13-199011:24", "199021:59-199021:88",
"199021:91-199021:128", "199021:130-199021:133", "199021:136-199021:309", "199021:311-199021:333", "199021:335-199021:410",
"199021:414-199021:469", "199021:471-199021:533", "199021:535-199021:563", "199021:565-199021:1223", "199021:1226-199021:1479",
"199021:1481-199021:1494", "199318:65-199318:138", "199319:1-199319:7", "199319:9-199319:223", "199319:226-199319:277",
"199319:280-199319:348", "199319:351-199319:358", "199319:360-199319:422", "199319:424-199319:490", "199319:492-199319:493",
"199319:496-199319:612", "199319:615-199319:642", "199319:645-199319:720", "199319:723-199319:728", "199319:730-199319:731",
"199319:734-199319:741", "199319:744-199319:752", "199319:754-199319:943", "199319:945-199319:997", "199336:1-199336:33",
"199336:36-199336:122", "199336:125-199336:231", "199336:234-199336:614", "199336:617-199336:789", "199336:791-199336:977",
"199356:95-199356:121", "199356:123-199356:168", "199356:171-199356:205", "199356:208-199356:231", "199409:25-199409:54",
"199409:56-199409:89", "199409:91-199409:204", "199409:206-199409:290", "199409:293-199409:583", "199409:586-199409:602",
"199409:604-199409:1014", "199409:1016-199409:1300", "199428:61-199428:197", "199428:200-199428:210", "199428:212-199428:382",
"199428:387-199428:414", "199428:417-199428:436", "199428:439-199428:530", "199428:533-199428:648", "199429:1-199429:28",
"199429:30-199429:36", "199429:39-199429:55", "199429:58-199429:101", "199429:103-199429:148", "199429:151-199429:154",
"199435:63-199435:106", "199435:109-199435:261", "199435:263-199435:579", "199435:582-199435:654", "199435:656-199435:696",
"199435:699-199435:1034", "199435:1037-199435:1144", "199435:1147-199435:1327", "199435:1330-199435:1411", "199435:1414-199435:1431",
"199435:1434-199435:1441", "199435:1444-199435:1487", "199435:1489-199435:1610", "199436:1-199436:113", "199436:116-199436:254",
"199436:257-199436:675", "199436:678-199436:748", "199564:1-199564:3", "199569:1-199569:2", "199569:5-199569:136",
"199569:139-199569:367", "199570:1-199570:17", "199571:1-199571:184", "199571:186-199571:360", "199571:363-199571:561",
"199572:1-199572:317", "199573:1-199573:22", "199574:1-199574:53", "199574:56-199574:153", "199574:156-199574:246",
"199608:60-199608:157", "199608:159-199608:209", "199608:211-199608:341", "199608:344-199608:390", "199608:392-199608:461",
"199608:464-199608:800", "199608:802-199608:1064", "199608:1067-199608:1392", "199608:1395-199608:1630", "199608:1633-199608:1904",
"199608:1907-199608:1962", "199608:1965-199608:2252", "199608:2255-199608:2422", "199698:72-199698:94", "199698:96-199698:127",
"199699:1-199699:154", "199699:157-199699:169", "199699:172-199699:410", "199699:412-199699:756", "199703:1-199703:94",
"199703:97-199703:482", "199703:485-199703:529", "199739:66-199739:133", "199751:103-199751:119", "199751:121-199751:127",
"199752:1-199752:141", "199752:144-199752:180", "199752:182-199752:186", "199752:188-199752:211", "199752:214-199752:322",
"199753:1-199753:59", "199754:1-199754:203", "199754:205-199754:325", "199754:328-199754:457", "199754:459-199754:607",
"199754:610-199754:613", "199754:615-199754:806", "199754:808-199754:998", "199804:78-199804:88", "199804:90-199804:181",
"199804:183-199804:235", "199804:238-199804:278", "199804:281-199804:290", "199804:292-199804:519", "199804:522-199804:575",
"199804:577-199804:628", "199804:631-199804:632", "199812:70-199812:141", "199812:144-199812:163", "199812:182-199812:211",
"199812:214-199812:471", "199812:474-199812:505", "199812:508-199812:557", "199812:560-199812:571", "199812:574-199812:623",
"199812:626-199812:751", "199812:754-199812:796", "199832:58-199832:62", "199832:65-199832:118", "199832:121-199832:139",
"199832:142-199832:286", "199833:1-199833:13", "199833:16-199833:103", "199833:105-199833:250", "199833:253-199833:493",
"199833:496-199833:794", "199833:797-199833:1032", "199833:1034-199833:1185", "199833:1188-199833:1239", "199834:1-199834:9",
"199834:11", "199834:14-199834:18", "199834:21-199834:54", "199834:56-199834:57", "199834:62-199834:65",
"199834:69-199834:284", "199834:286-199834:503", "199834:505-199834:942", "199862:59-199862:141", "199864:1-199864:87",
"199864:89", "199864:92-199864:103", "199864:106-199864:372", "199864:374-199864:385", "199864:388-199864:486",
"199867:1-199867:134", "199867:136-199867:172", "199867:174-199867:218", "199867:221-199867:320", "199868:1-199868:21",
"199875:70-199875:150", "199875:152-199875:334", "199876:1-199876:19", "199876:22-199876:95", "199876:97-199876:249",
"199876:252-199876:272", "199876:274-199876:340", "199876:343-199876:362", "199876:365-199876:376", "199877:1-199877:173",
"199877:175-199877:605", "199877:607-199877:701", "199877:703-199877:871", "199960:72-199960:139", "199960:141-199960:197",
"199960:204-199960:232", "199960:235-199960:363", "199960:365-199960:367", "199960:370-199960:380", "199960:383-199960:459",
"199960:461-199960:466", "199960:469-199960:485", "199961:1-199961:211", "199961:213-199961:287", "199967:60-199967:120",
"199967:122-199967:170", "199967:172-199967:198", "199973:73-199973:89", "200041:62-200041:83", "200041:85-200041:157",
"200041:162-200041:274", "200041:277-200041:318", "200041:321-200041:335", "200041:337-200041:386", "200041:388-200041:389",
"200041:392-200041:400", "200041:402-200041:568", "200041:571-200041:593", "200041:595-200041:646", "200041:649-200041:728",
"200041:731-200041:860", "200041:862-200041:930", "200041:932-200041:1096", "200042:1-200042:110", "200042:112-200042:536",
"200049:1-200049:177", "200075:76-200075:139", "200075:142-200075:232", "200075:256-200075:326", "200075:329-200075:422",
"200075:425-200075:431", "200075:434-200075:500", "200075:502-200075:605", "200091:67", "200091:70-200091:151",
"200091:154-200091:172", "200091:174-200091:187", "200091:190-200091:196", "200091:199-200091:201", "200091:204-200091:425",
"200091:428-200091:535", "200091:537-200091:607", "200091:610-200091:879", "200091:881-200091:943", "200091:946-200091:999",
"200091:1001-200091:1025", "200091:1027-200091:1132", "200091:1135-200091:1339", "200091:1341-200091:1433", "200091:1435-200091:1450",
"200091:1453-200091:1523", "200091:1526-200091:1664", "200091:1667-200091:1680", "200091:1683-200091:1710", "200152:74-200152:116",
"200160:52-200160:68", "200161:1-200161:97", "200161:100-200161:112", "200174:81-200174:84", "200177:1-200177:56",
"200178:1-200178:38", "200180:1-200180:18", "200186:1-200186:3", "200186:6-200186:24", "200188:1-200188:24",
"200188:27-200188:28", "200188:31-200188:76", "200188:79-200188:271", "200188:274-200188:352", "200190:1-200190:4",
"200190:6-200190:76", "200190:79-200190:143", "200190:146-200190:159", "200190:162-200190:256", "200190:258-200190:321",
"200190:324-200190:401", "200190:403-200190:453", "200190:456-200190:457", "200190:460-200190:565", "200190:567-200190:588",
"200190:591", "200190:593-200190:595", "200190:597-200190:646", "200190:649-200190:878", "200229:1-200229:33",
"200229:41-200229:219", "200229:222-200229:244", "200229:247-200229:290", "200229:293-200229:624", "200229:627-200229:629",
"200243:69-200243:103", "200243:106-200243:139", "200244:3-200244:304", "200244:307-200244:442", "200244:445-200244:507",
"200244:510-200244:619", "200245:1-200245:103", "200245:105-200245:128", "200245:131-200245:248", "200245:251-200245:357",
"200368:72-200368:180", "200369:1-200369:5", "200369:8-200369:61", "200369:64-200369:360", "200369:363-200369:439",
"200369:441-200369:578", "200369:580-200369:603", "200369:606-200369:684", "200369:686", "200381:8-200381:15",
"200381:18-200381:36", "200381:38-200381:89", "200381:91-200381:195", "200466:134-200466:274", "200473:96-200473:157",
"200473:159-200473:224", "200473:226-200473:304", "200473:306-200473:469", "200473:472-200473:524", "200473:527-200473:542",
"200473:545-200473:619", "200473:622-200473:688", "200473:691-200473:730", "200473:733-200473:738", "200473:740-200473:1324",
"200491:87-200491:107", "200491:110-200491:149", "200491:152-200491:157", "200491:160-200491:197", "200491:199-200491:237",
"200491:240-200491:270", "200491:273", "200491:276-200491:334", "200491:336-200491:360", "200491:363-200491:419",
"200515:97-200515:183", "200519:1-200519:111", "200519:114-200519:126", "200519:129-200519:136", "200519:138-200519:224",
"200519:227-200519:258", "200519:261-200519:350", "200519:353-200519:611", "200519:613-200519:747", "200525:77-200525:149",
"200525:151-200525:164", "200525:166-200525:190", "200525:193-200525:276", "200525:278-200525:311", "200525:314-200525:464",
"200525:467-200525:488", "200525:491-200525:674", "200525:676-200525:704", "200525:707-200525:755", "200525:757-200525:895",
"200525:898-200525:937", "200525:939-200525:990", "200532:1-200532:37", "200599:75-200599:129", "200599:132-200599:137",
"200600:1-200600:183", "200600:186-200600:299", "200600:302-200600:313", "200600:316-200600:324", "200600:327-200600:334",
"200600:336-200600:397", "200600:399-200600:417", "200600:420-200600:526", "200600:529-200600:591", "200600:594-200600:596",
"200600:598-200600:609", "200600:611-200600:660", "200600:663-200600:823", "200600:826-200600:900", "200600:902-200600:943",
"200600:945-200600:1139", "200961:1-200961:115", "200976:94-200976:164", "200990:75-200990:143", "200991:1-200991:42",
"200991:44", "200991:47-200991:80", "200991:83-200991:175", "200991:178-200991:181", "200991:184-200991:252",
"200991:255-200991:632", "200991:635-200991:916", "200991:918-200991:1017", "200991:1019-200991:1048", "200992:1-200992:405",
"200992:408-200992:434", "200992:436-200992:581", "201062:78-201062:268", "201097:83-201097:136", "201097:138-201097:245",
"201097:248-201097:300", "201097:303-201097:370", "201097:372-201097:429", "201097:432-201097:497", "201114:1-201114:14",
"201115:1-201115:73", "201159:70-201159:211", "201164:1-201164:8", "201164:10-201164:94", "201164:96-201164:125",
"201164:128-201164:178", "201164:180-201164:198", "201164:200-201164:271", "201164:274-201164:416", "201164:418",
"201168:1-201168:37", "201168:39-201168:275", "201168:278-201168:481", "201168:483-201168:558", "201168:560-201168:730",
"201173:1-201173:194", "201173:197-201173:586", "201174:1-201174:214", "201174:216-201174:263", "201174:265-201174:339",
"201174:342-201174:451", "201191:75-201191:98", "201191:100-201191:216", "201191:218-201191:389", "201191:392-201191:492",
"201191:494-201191:506", "201191:509-201191:585", "201191:587-201191:594", "201191:597-201191:607", "201191:609-201191:794",
"201191:796-201191:838", "201191:841-201191:974", "201191:977-201191:1105", "201191:1108-201191:1117", "201191:1120-201191:1382",
"201191:1385-201191:1386", "201193:1-201193:19", "201196:1-201196:238", "201196:241-201196:278", "201196:286-201196:299",
"201196:302-201196:338", "201196:341-201196:515", "201196:518-201196:720", "201196:723-201196:789", "201196:803-201196:841",
"201197:1-201197:23", "201202:1-201202:437", "201229:1-201229:5", "201229:8-201229:26", "201229:29-201229:73",
"201278:62-201278:163", "201278:166-201278:229", "201278:232-201278:256", "201278:259-201278:316", "201278:318-201278:595",
"201278:598-201278:938", "201278:942-201278:974", "201278:976-201278:1160", "201278:1163-201278:1304", "201278:1306-201278:1793",
"201278:1796-201278:1802", "201278:1805-201278:1906", "201278:1909-201278:1929", "201278:1932-201278:2174", "201554:70-201554:86",
"201554:88-201554:114", "201554:116-201554:126", "201602:76-201602:81", "201602:83-201602:194", "201602:196-201602:494",
"201602:496-201602:614", "201602:617-201602:635", "201611:87-201611:145", "201611:149-201611:182", "201611:184-201611:186",
"201613:1-201613:42", "201613:44-201613:49", "201613:53-201613:210", "201613:213-201613:215", "201613:218-201613:225",
"201613:228-201613:646", "201624:83-201624:92", "201624:95-201624:240", "201624:270", "201625:211-201625:312",
"201625:315-201625:348", "201625:351-201625:416", "201625:418-201625:588", "201625:591-201625:671", "201625:673-201625:758",
"201625:760-201625:791", "201625:793-201625:944", "201657:77-201657:93", "201657:95-201657:108", "201657:110-201657:118",
"201658:1-201658:19", "201658:21-201658:118", "201658:121-201658:136", "201658:139-201658:288", "201668:78-201668:157",
"201669:1-201669:9", "201669:12-201669:136", "201669:139-201669:141", "201669:143-201669:165", "201671:1-201671:120",
"201671:122-201671:174", "201671:177-201671:462", "201671:464-201671:482", "201671:485-201671:499", "201671:501-201671:545",
"201671:547-201671:571", "201671:574-201671:614", "201671:617-201671:766", "201671:768-201671:896", "201671:899-201671:911",
"201671:914-201671:1007", "201678:1-201678:120", "201679:1-201679:110", "201679:112-201679:241", "201679:244-201679:298",
"201679:302-201679:321", "201679:324-201679:461", "201679:463-201679:483", "201692:78-201692:81", "201692:83-201692:179",
"201705:65-201705:73", "201705:75-201705:109", "201705:111-201705:187", "201706:1-201706:62", "201707:1-201707:23",
"201707:26-201707:42", "201707:45-201707:115", "201707:118-201707:130", "201707:133-201707:160", "201707:163-201707:276",
"201707:279-201707:471", "201707:473-201707:511", "201707:514-201707:545", "201707:547-201707:570", "201707:572-201707:622",
"201707:625-201707:735", "201707:738-201707:806", "201707:809-201707:876", "201707:879-201707:964", "201708:1-201708:79",
"201718:58-201718:108", "201727:67-201727:185", "201729:6-201729:20", "201729:22-201729:75", "201729:77-201729:126",
"201729:129-201729:154", "201729:156-201729:216", "201729:219-201729:244", "201794:58-201794:94", "201802:68-201802:209",
"201802:211-201802:214", "201802:216-201802:220", "201802:223-201802:288", "201802:290-201802:296", "201816:1-201816:72",
"201816:74-201816:105", "201816:107-201816:157", "201817:1-201817:274", "201818:1", "201819:1-201819:94",
"201819:96-201819:241", "201824:1-201824:139", "201824:141-201824:176", "201824:179-201824:286", "201824:289-201824:492",
"202012:98-202012:121", "202012:126-202012:131", "202013:1-202013:2", "202013:5-202013:35", "202013:38-202013:57",
"202014:1-202014:5", "202014:8-202014:14", "202014:16-202014:18", "202014:20-202014:77", "202014:79-202014:102",
"202014:104-202014:174", "202014:177-202014:190", "202014:192-202014:196", "202016:1-202016:48", "202016:51-202016:134",
"202016:137-202016:177", "202016:179-202016:743", "202016:745-202016:831", "202016:834-202016:890", "202016:893-202016:896",
"202016:898-202016:932", "202016:934-202016:1010", "202044:84-202044:101", "202044:104-202044:266", "202044:268-202044:461",
"202044:463-202044:466", "202045:1-202045:30", "202045:33-202045:72", "202045:75-202045:528", "202045:531-202045:601",
"202045:603-202045:785", "202045:788-202045:809", "202045:822-202045:823", "202054:6-202054:266", "202054:268-202054:489",
"202054:492-202054:605", "202054:608-202054:631", "202060:76-202060:142", "202060:144-202060:154", "202060:156-202060:244",
"202060:246-202060:497", "202060:499-202060:642", "202060:644-202060:682", "202060:684-202060:743", "202060:746-202060:936",
"202074:66-202074:174", "202075:1-202075:18", "202075:21-202075:187", "202075:189-202075:214", "202075:217-202075:247",
"202075:250-202075:342", "202075:345-202075:406", "202075:409-202075:497", "202075:500-202075:537", "202075:539",
"202075:542-202075:560", "202075:562-202075:615", "202075:618-202075:628", "202084:83-202084:156", "202084:159-202084:177",
"202084:179-202084:180", "202084:182-202084:239", "202087:1-202087:25", "202087:28-202087:208", "202087:210-202087:357",
"202087:359-202087:652", "202087:655-202087:853", "202087:856-202087:1093", "202088:1-202088:286", "202093:1-202093:104",
"202093:107-202093:320", "202093:322-202093:360", "202116:59-202116:60", "202178:67-202178:78", "202178:80-202178:88",
"202178:91-202178:177", "202178:180-202178:186", "202178:188-202178:337", "202178:340-202178:377", "202178:379-202178:425",
"202178:428-202178:475", "202178:478-202178:548", "202178:551-202178:717", "202178:720-202178:965", "202178:967-202178:1444",
"202178:1447-202178:1505", "202178:1508-202178:1519", "202178:1522-202178:1555", "202205:94-202205:114", "202209:1-202209:48",
"202209:51-202209:142", "202237:39-202237:128", "202237:131", "202237:134-202237:219", "202237:222-202237:235",
"202237:238-202237:275", "202237:277-202237:289", "202237:291-202237:316", "202237:319-202237:419", "202237:422-202237:538",
"202237:540-202237:936", "202237:939-202237:950", "202237:952-202237:976", "202237:979-202237:1079", "202272:76-202272:112",
"202272:115-202272:141", "202272:144-202272:185", "202272:188-202272:205", "202272:208-202272:305", "202272:307-202272:313",
"202272:315-202272:371", "202272:436-202272:480", "202272:483-202272:555", "202272:558-202272:577", "202272:579-202272:683",
"202272:686-202272:705", "202272:707-202272:740", "202272:742-202272:890", "202272:937-202272:1295", "202272:1299-202272:1481",
"202299:68-202299:84", "202299:87-202299:141", "202299:143-202299:193", "202299:196-202299:358", "202299:361-202299:379",
"202299:382-202299:414", "202299:416-202299:452", "202299:455-202299:555", "202305:1-202305:89", "202305:92-202305:130",
"202305:133-202305:323", "202314:67-202314:104", "202314:107-202314:265", "202314:268-202314:278", "202328:46-202328:89",
"202328:92-202328:156", "202328:158-202328:276", "202328:278-202328:291", "202328:294-202328:434", "202328:437-202328:460",
"202328:463-202328:586", "202328:588-202328:610", "202328:612-202328:614", "202333:1-202333:235", "202389:81-202389:182",
"202389:185-202389:190", "202389:192-202389:199", "202469:87-202469:158", "202469:160-202469:174", "202469:177-202469:352",
"202472:1-202472:96", "202472:99-202472:112", "202477:1-202477:129", "202477:131-202477:150", "202478:1-202478:177",
"202478:180-202478:183", "202478:186-202478:219", "202478:222-202478:360", "202478:362-202478:506", "202478:509-202478:531",
"202478:534-202478:718", "202478:720-202478:927", "202478:929-202478:973", "202478:975-202478:1029", "202478:1031-202478:1186",
"202478:1189-202478:1212", "202478:1215-202478:1248", "202504:77-202504:96", "202504:99-202504:133", "202504:135-202504:182",
"202504:184-202504:211", "202504:213-202504:241", "202504:243-202504:392", "202504:395-202504:527", "202504:529-202504:617",
"202504:620-202504:715", "202504:718-202504:763", "202504:766-202504:1172", "202504:1174-202504:1247", "202504:1250-202504:1471",
"202504:1474-202504:1679", "202504:1682-202504:1704", "202972:1-202972:30", "202972:33-202972:184", "202972:186-202972:290",
"202972:292-202972:295", "202972:298-202972:371", "202972:374-202972:429", "202972:431-202972:544", "202973:1-202973:234",
"202973:237-202973:305", "202973:308-202973:437", "202973:439-202973:530", "202973:532-202973:541", "202973:544-202973:552",
"202973:555-202973:851", "202973:853-202973:1408", "203002:77-203002:128", "203002:130-203002:141", "203002:144-203002:207",
"203002:209-203002:267", "203002:270-203002:360", "203002:362-203002:501", "203002:504-203002:641", "203002:643-203002:669",
"203002:671", "203002:674-203002:717", "203002:720-203002:1034", "203002:1037-203002:1070", "203002:1073-203002:1370",
"203002:1372-203002:1392", "203002:1395-203002:1410", "203002:1413-203002:1596", "203709:1-203709:121", "203742:1-203742:29",
"203777:103-203777:113", "203830:82-203830:182", "203832:1-203832:11", "203833:1-203833:70", "203833:73-203833:128",
"203834:1-203834:40", "203835:1-203835:70", "203835:73-203835:358", "203853:122-203853:222", "203894:82-203894:272",
"203894:275-203894:477", "203894:480-203894:902", "203894:905-203894:1319", "203909:79-203909:113", "203909:116-203909:117",
"203909:120-203909:140", "203909:143-203909:382", "203912:1-203912:306", "203912:308-203912:566", "203912:569-203912:609",
"203912:611-203912:698", "203912:701-203912:820", "203912:823-203912:865", "203912:867-203912:1033", "203912:1035-203912:1321",
"203987:1-203987:9", "203987:12-203987:241", "203987:243-203987:339", "203987:342-203987:781", "203987:784-203987:1014",
"203992:1-203992:15", "203994:1-203994:56", "203994:59-203994:136", "203994:139-203994:304", "203994:306-203994:342",
"203994:344-203994:425", "204100:117-204100:139", "204101:1-204101:74", "204113:82-204113:96", "204113:98-204113:102",
"204113:105-204113:127", "204113:129-204113:191", "204113:194-204113:258", "204113:261-204113:327", "204113:329-204113:388",
"204113:390-204113:400", "204113:402-204113:583", "204113:585-204113:690", "204114:1-204114:358", "204238:23-204238:52",
"204238:55", "204250:92-204250:118", "204250:121-204250:177", "204250:179-204250:285", "204250:287-204250:336",
"204250:339-204250:400", "204250:403-204250:521", "204250:524-204250:543", "204250:546-204250:682", "204250:684-204250:801",
"204511:1-204511:56", "204541:5-204541:39", "204541:42", "204541:44-204541:139", "204541:142-204541:149",
"204541:151-204541:204", "204544:1-204544:11", "204544:13-204544:93", "204544:96-204544:195", "204544:197-204544:224",
"204544:226-204544:334", "204544:337-204544:426", "204552:1-204552:9", "204553:1-204553:51", "204553:53-204553:60",
"204553:63-204553:101", "204554:1-204554:5", "204554:7-204554:221", "204554:224-204554:455", "204554:458-204554:470",
"204554:472-204554:481", "204554:483-204554:514", "204555:1-204555:329", "204555:331-204555:334", "204563:91-204563:99",
"204563:102-204563:178", "204563:180-204563:219", "204563:222-204563:229", "204563:231-204563:364", "204563:366",
"204563:369-204563:470", "204563:473-204563:524", "204563:527-204563:571", "204564:1-204564:84", "204564:87-204564:89",
"204564:92-204564:159", "204564:161-204564:187", "204564:190-204564:191", "204564:193-204564:293", "204564:296-204564:315",
"204564:317-204564:340", "204564:343-204564:427", "204564:429-204564:434", "204564:437-204564:735", "204564:737-204564:855",
"204564:858-204564:1206", "204564:1209-204564:1248", "204564:1251-204564:1284", "204565:1-204565:48", "204566:1-204566:12",
"204567:1-204567:38", "204576:49-204576:192", "204576:195-204576:301", "204577:1-204577:46", "204577:49-204577:64",
"204577:67-204577:105", "204577:107-204577:170", "204577:173-204577:181", "204577:183-204577:193", "204577:196-204577:653",
"204577:656-204577:669", "204577:671-204577:740", "204577:742-204577:913", "204577:915-204577:1057", "204577:1059-204577:1115",
"204577:1117-204577:1282", "204599:73-204599:83", "204599:85-204599:94", "204599:97-204599:121", "204599:124-204599:125",
"204599:128-204599:173", "204599:175-204599:240", "204599:243-204599:245", "204599:248-204599:264", "204599:266-204599:292",
"204599:294-204599:334", "204601:1-204601:25", "204601:28-204601:62", "204601:65-204601:80", "204601:83-204601:89",
"204601:92-204601:290", "204601:292-204601:563", "204601:565-204601:591", "204601:593-204601:652", "204601:655-204601:780",
"204601:783-204601:812", "204601:814-204601:892", "204601:894-204601:984", "204601:986-204601:1003", "204601:1006-204601:1038",
"204601:1040-204601:1088", "204601:1091-204601:1102", "204601:1105-204601:1161", "204601:1164-204601:1250", "205086:95-205086:149",
"205111:88-205111:390", "205111:392-205111:441", "205111:444-205111:446", "205158:81-205158:289", "205158:292-205158:313",
"205158:315-205158:473", "205158:476-205158:591", "205158:594-205158:595", "205158:597-205158:612", "205158:615-205158:663",
"205158:665-205158:667", "205158:672-205158:685", "205158:687-205158:733", "205193:80-205193:109", "205193:111-205193:349",
"205193:352-205193:486", "205193:488-205193:650", "205193:652-205193:712", "205193:714-205193:902", "205217:1-205217:12",
"205217:16-205217:111", "205217:113-205217:171", "205217:174-205217:250", "205217:253-205217:318", "205233:94-205233:153",
"205236:1-205236:190", "205236:193-205236:207", "205236:209-205236:260", "205236:263-205236:331", "205236:334-205236:352",
"205238:1-205238:6", "205238:9-205238:199", "205238:202-205238:254", "205238:256-205238:304", "205238:306-205238:355",
"205238:358-205238:381", "205238:384-205238:596", "205238:598-205238:617", "205303:35-205303:54", "205303:90-205303:132",
"205303:135-205303:144", "205310:76-205310:306", "205310:309-205310:313", "205310:316", "205310:319-205310:321",
"205310:324-205310:457", "205310:460-205310:559", "205311:1-205311:85", "205311:88-205311:92", "205311:95-205311:183",
"205311:186-205311:395", "205311:397-205311:592", "205311:595-205311:910", "205311:913-205311:1260", "205339:71-205339:175",
"205339:178-205339:213", "205339:216-205339:230", "205339:233-205339:262", "205339:265-205339:404", "205344:1-205344:83",
"205344:86-205344:104", "205344:106-205344:359", "205344:362-205344:431", "205344:433-205344:949", "205344:951-205344:967",
"205344:969-205344:1127", "205344:1129-205344:1346", "205344:1348-205344:1586", "205515:82-205515:201", "205515:203-205515:216",
"205519:1-205519:47", "205519:50-205519:172", "205519:175-205519:367", "205519:370-205519:386", "205519:389-205519:472",
"205526:1-205526:269", "205526:272-205526:277", "205526:280-205526:332", "205614:1-205614:4", "205614:7-205614:40",
"205617:1-205617:29", "205617:32-205617:102", "205617:105-205617:123", "205617:125-205617:140", "205617:143-205617:264",
"205617:266-205617:448", "205617:451-205617:532", "205617:534-205617:547", "205618:1-205618:12", "205620:1-205620:175",
"205666:60-205666:119", "205666:122-205666:165", "205666:168-205666:259", "205666:261-205666:322", "205666:325-205666:578",
"205666:580-205666:594", "205666:597-205666:721", "205666:724-205666:739", "205667:1-205667:165", "205667:168-205667:282",
"205667:285-205667:318", "205667:321-205667:412", "205667:415-205667:689", "205667:692-205667:751", "205667:754-205667:774",
"205667:777-205667:1109", "205683:76-205683:82", "205683:85-205683:178", "205683:181-205683:198", "205683:201-205683:305",
"205690:1-205690:40", "205694:1-205694:205", "205694:208-205694:230", "205694:233-205694:347", "205694:350-205694:452",
"205694:455-205694:593", "205694:595-205694:890", "205718:49-205718:75", "205718:78-205718:97", "205718:100-205718:103",
"205718:105-205718:176", "205718:178-205718:338", "205718:341-205718:361", "205718:363-205718:524", "205718:527-205718:531",
"205718:534-205718:589", "205718:591-205718:694", "205774:1-205774:80", "205777:1-205777:8", "205781:1-205781:89",
"205781:91-205781:197", "205781:200-205781:502", "205826:80-205826:232", "205826:235-205826:303", "205826:306-205826:468",
"205833:84-205833:86", "205833:89-205833:121", "205833:123-205833:155", "205833:157-205833:165", "205833:167-205833:173",
"205833:176-205833:219", "205833:221-205833:267", "205833:270-205833:312", "205833:315-205833:346", "205833:350-205833:355",
"205833:360-205833:366", "205834:1-205834:12", "205834:14-205834:195", "205908:68-205908:200", "205908:202-205908:209",
"205921:22-205921:73", "205921:76-205921:268", "205921:271-205921:394", "205921:397-205921:401", "205921:410-205921:428",
"205921:431-205921:498", "205921:500-205921:571", "205921:574-205921:779", "205921:782-205921:853", "206066:89-206066:146",
"206088:86-206088:159", "206088:161-206088:178", "206088:181-206088:199", "206088:202-206088:286", "206102:83-206102:116",
"206102:120-206102:130", "206102:133-206102:208", "206102:211-206102:235", "206102:238-206102:246", "206102:249-206102:278",
"206102:281-206102:349", "206187:107-206187:169", "206187:172-206187:242", "206187:245-206187:288", "206187:290-206187:340",
"206187:343-206187:427", "206187:429-206187:435", "206187:437-206187:486", "206187:489-206187:569", "206187:571-206187:647",
"206187:649-206187:662", "206187:664-206187:708", "206188:1-206188:40", "206188:42-206188:55", "206199:1-206199:75",
"206199:77-206199:82", "206199:85-206199:114", "206207:82-206207:130", "206207:132-206207:176", "206207:179-206207:194",
"206207:196-206207:388", "206207:390-206207:419", "206207:422-206207:447", "206207:450-206207:569", "206207:572-206207:690",
"206208:1-206208:470", "206208:472-206208:518", "206210:11-206210:25", "206210:28-206210:275", "206210:277-206210:298",
"206210:300-206210:383", "206210:386-206210:466", "206243:62-206243:169", "206243:172-206243:196", "206243:199-206243:354",
"206243:357-206243:433", "206243:435-206243:448", "206243:451-206243:533", "206243:536-206243:554", "206243:557-206243:723",
"206243:726-206243:905", "206245:1-206245:62", "206246:1-206246:14", "206246:16-206246:237", "206246:240-206246:285",
"206246:288-206246:407", "206246:412-206246:676", "206246:678-206246:704", "206246:706-206246:785", "206246:787-206246:962",
"206246:965-206246:997", "206246:1000-206246:1198", "206246:1201-206246:1290", "206257:1-206257:29", "206258:1-206258:36",
"206258:39-206258:223", "206258:226-206258:249", "206302:1-206302:8", "206302:11-206302:33", "206302:36-206302:44",
"206302:47-206302:82", "206302:84-206302:108", "206302:110-206302:149", "206302:151-206302:186", "206302:189-206302:229",
"206302:231-206302:232", "206302:234-206302:241", "206302:243-206302:276", "206303:1-206303:19", "206303:23-206303:286",
"206304:1-206304:4", "206304:6-206304:62", "206331:91-206331:222", "206331:225-206331:312", "206389:88-206389:185",
"206389:187-206389:249", "206389:252-206389:272", "206389:275-206389:392", "206391:1-206391:55", "206391:57-206391:91",
"206401:69-206401:90", "206401:92-206401:194", "206401:197-206401:210", "206401:212-206401:249", "206401:251-206401:265",
"206401:267-206401:409", "206446:92-206446:141", "206446:143-206446:159", "206446:162-206446:205", "206446:208-206446:301",
"206446:304-206446:442", "206446:445", "206446:448-206446:474", "206446:476-206446:616", "206446:619-206446:872",
"206446:874-206446:910", "206446:912-206446:948", "206446:950-206446:989", "206446:992-206446:1030", "206446:1033-206446:1075",
"206446:1109-206446:1149", "206448:1-206448:143", "206448:145-206448:559", "206448:561-206448:1170", "206448:1173-206448:1231",
"206448:1235-206448:1237", "206466:24-206466:137", "206466:140-206466:277", "206466:280-206466:296", "206466:299-206466:303",
"206466:306-206466:405", "206466:407-206466:419", "206466:422-206466:477", "206466:480-206466:511", "206466:514-206466:676",
"206476:73-206476:129", "206476:133-206476:137", "206476:140-206476:141", "206476:143-206476:219", "206477:1-206477:14",
"206477:16-206477:31", "206477:33-206477:41", "206477:44-206477:51", "206477:53-206477:70", "206477:73-206477:75",
"206477:77-206477:89", "206477:91-206477:94", "206477:97-206477:115", "206477:118-206477:184", "206478:1-206478:27",
"206478:29-206478:136", "206478:139-206478:144", "206484:73-206484:95", "206484:98-206484:133", "206484:136-206484:163",
"206484:166-206484:186", "206484:189-206484:384", "206484:387-206484:463", "206484:465-206484:551", "206484:554",
"206484:556-206484:669", "206512:91-206512:123", "206512:125-206512:133", "206512:136-206512:161", "206512:163-206512:190",
"206512:193-206512:201", "206512:203-206512:212", "206512:214-206512:332", "206512:334-206512:584", "206512:587-206512:604",
"206512:607-206512:1005", "206512:1008-206512:1123", "206512:1126-206512:1163", "206512:1165-206512:1211", "206513:3-206513:39",
"206513:42-206513:188", "206513:191-206513:234", "206513:237-206513:238", "206513:241-206513:323", "206542:1-206542:115",
"206542:117-206542:165", "206542:168-206542:511", "206542:514-206542:547", "206542:550-206542:603", "206542:606-206542:668",
"206542:671-206542:727", "206542:730-206542:739", "206542:741-206542:833", "206550:77-206550:132", "206550:135-206550:144",
"206572:37-206572:47", "206573:2-206573:14", "206574:1-206574:87", "206575:1-206575:7", "206575:10",
"206575:12-206575:69", "206594:72-206594:107", "206594:110-206594:246", "206594:249-206594:281", "206595:1-206595:34",
"206595:37-206595:42", "206595:45-206595:193", "206596:1-206596:13", "206596:15-206596:220", "206596:222-206596:228",
"206596:231-206596:236", "206596:239-206596:292", "206596:295-206596:695", "206596:697-206596:728", "206596:730-206596:810",
"206598:1-206598:81", "206598:83-206598:103", "206598:105-206598:588", "206598:591-206598:657", "206598:659-206598:719",
"206605:1-206605:36", "206605:39-206605:78", "206744:49-206744:157", "206744:160-206744:192", "206744:195-206744:395",
"206744:398-206744:452", "206745:1-206745:81", "206745:84-206745:199", "206745:202-206745:224", "206745:227-206745:237",
"206745:240-206745:304", "206745:306-206745:318", "206745:321-206745:720", "206745:723-206745:796", "206745:799-206745:894",
"206745:897-206745:944", "206745:946-206745:1106", "206745:1108-206745:1524", "206745:1527-206745:1862", "206745:1988-206745:1996",
"206859:79-206859:210", "206859:212-206859:258", "206859:260-206859:323", "206859:325-206859:356", "206859:359-206859:609",
"206859:612-206859:681", "206859:684-206859:732", "206859:734-206859:768", "206859:771-206859:808", "206859:811-206859:827",
"206859:830-206859:848", "206866:1-206866:30", "206866:33-206866:113", "206866:115-206866:274", "206868:1-206868:3",
"206868:10-206868:16", "206869:1-206869:251", "206869:253-206869:271", "206869:274-206869:502", "206869:507-206869:520",
"206869:522-206869:566", "206869:568-206869:752", "206897:1-206897:34", "206897:38-206897:61", "206897:63-206897:102",
"206897:109", "206897:111-206897:112", "206897:114-206897:131", "206897:133-206897:137", "206901:1-206901:98",
"206906:1-206906:31", "206906:38-206906:94", "206906:96-206906:136", "206906:138-206906:139", "206906:142-206906:149",
"206906:151-206906:175", "206906:177-206906:206", "206940:1-206940:151", "206940:153", "206940:155-206940:298",
"206940:301-206940:382", "206940:384-206940:712", "206940:715-206940:803", "206940:805-206940:960", "206940:963-206940:1027",
"207099:83-207099:134", "207099:137-207099:172", "207099:175-207099:213", "207099:216-207099:314", "207099:316-207099:320",
"207099:323-207099:330", "207099:333-207099:367", "207099:370-207099:481", "207099:484-207099:602", "207099:605-207099:755",
"207099:757-207099:1046", "207099:1048-207099:1171", "207100:1-207100:91", "207100:94", "207214:57-207214:112",
"207214:114-207214:177", "207214:179-207214:181", "207214:184-207214:196", "207214:199-207214:220", "207214:223-207214:262",
"207214:265-207214:405", "207214:408-207214:482", "207214:485-207214:640", "207214:643-207214:708", "207214:718-207214:757",
"207214:759-207214:808", "207214:811-207214:829", "207217:1-207217:32", "207219:1-207219:112", "207220:1-207220:160",
"207221:1-207221:102", "207222:1-207222:17", "207222:20-207222:289", "207231:70-207231:84", "207231:86-207231:121",
"207231:123-207231:184", "207231:187-207231:189", "207231:192-207231:303", "207231:306-207231:354", "207231:357-207231:481",
"207231:484-207231:504", "207231:508-207231:549", "207231:552-207231:626", "207231:628-207231:690", "207231:693-207231:875",
"207231:878-207231:1000", "207231:1003-207231:1170", "207231:1173-207231:1187", "207231:1189-207231:1227", "207231:1229-207231:1415",
"207231:1418-207231:1445", "207231:1447-207231:1505", "207233:1-207233:119", "207233:121-207233:148", "207269:80-207269:394",
"207269:397-207269:436", "207269:439-207269:463", "207269:466-207269:551", "207269:568-207269:577", "207273:3-207273:877",
"207279:68-207279:138", "207279:141-207279:149", "207279:151-207279:237", "207279:240-207279:266", "207279:269-207279:307",
"207279:309-207279:416", "207279:498-207279:551", "207279:554-207279:640", "207279:643-207279:961", "207279:963-207279:1095",
"207279:1098-207279:1160", "207320:1-207320:110", "207320:112-207320:350", "207371:72-207371:117", "207371:120-207371:124",
"207372:1-207372:27", "207372:30-207372:113", "207372:116-207372:154", "207372:156-207372:174", "207372:176-207372:478",
"207372:480-207372:496", "207397:32-207397:77", "207397:80-207397:140", "207397:143-207397:179", "207398:1-207398:14",
"207398:16-207398:33", "207454:79-207454:95", "207454:98-207454:123", "207454:126-207454:259", "207454:261-207454:363",
"207454:365-207454:458", "207454:461-207454:498", "207454:501-207454:609", "207454:612-207454:632", "207454:635-207454:781",
"207454:784-207454:866", "207454:869-207454:974", "207454:977-207454:1064", "207454:1067-207454:1079", "207454:1081-207454:1321",
"207454:1323-207454:1464", "207454:1467-207454:1569", "207454:1571-207454:1604", "207454:1607-207454:1712", "207454:1714-207454:1988",
"207469:1-207469:31", "207469:34-207469:45", "207477:76-207477:104", "207477:107-207477:111", "207477:114-207477:147",
"207477:150-207477:295", "207477:298-207477:483", "207477:486-207477:494", "207477:497-207477:527", "207477:530-207477:563",
"207477:565-207477:570", "207487:50-207487:98", "207487:101-207487:311", "207487:313-207487:359", "207487:363-207487:468",
"207487:471-207487:472", "207488:1-207488:63", "207488:66-207488:92", "207488:95-207488:113", "207488:116-207488:198",
"207488:200-207488:250", "207488:252-207488:288", "207488:291-207488:365", "207488:368-207488:377", "207488:379-207488:440",
"207490:1-207490:48", "207490:51-207490:111", "207491:1-207491:176", "207491:179-207491:458", "207492:1-207492:20",
"207492:23-207492:298", "207515:79-207515:109", "207515:112-207515:132", "207515:134-207515:208", "207515:211-207515:225",
"207515:228-207515:320", "207515:322-207515:381", "207515:383-207515:498", "207515:500-207515:730", "207515:733-207515:849",
"207515:851-207515:954", "207515:957-207515:994", "207515:997-207515:1052", "207515:1055-207515:1143", "207515:1145-207515:1211",
"207517:1-207517:12", "207517:15-207517:57", "207518:1-207518:59", "207518:61-207518:83", "207882:22-207882:45",
"207883:1", "207883:3-207883:4", "207883:7-207883:75", "207884:1-207884:106", "207884:108-207884:183",
"207885:1-207885:90", "207886:1-207886:30", "207886:32-207886:90", "207886:92-207886:156", "207886:158-207886:166",
"207886:168-207886:171", "207889:1-207889:43", "207889:47-207889:57", "207889:60-207889:303", "207889:306-207889:442",
"207889:445", "207889:447-207889:551", "207889:553-207889:731", "207889:733-207889:907", "207889:910-207889:945",
"207898:1-207898:33", "207898:36-207898:57", "207898:60-207898:235", "207898:239-207898:257", "207898:260-207898:277",
"207905:75-207905:196", "207905:198-207905:281", "207905:284-207905:329", "207905:331-207905:402", "207905:404-207905:565",
"207905:568-207905:672", "207905:675-207905:805", "207905:807-207905:850", "207905:852-207905:861", "207905:864-207905:884",
"207905:886-207905:1180", "207905:1183-207905:1283", "207905:1285-207905:1331", "207905:1333-207905:1515", "207905:1518-207905:1734",
"207905:1737-207905:1796", "207920:84-207920:146", "207920:149-207920:241", "207920:243-207920:261", "207920:264-207920:291",
"207920:294-207920:486", "207920:489-207920:518", "207920:520-207920:598", "207920:600-207920:708", "207920:710-207920:826",
"207921:1-207921:37", "207921:40-207921:58", "207922:1-207922:69", "207922:71-207922:100", "207922:103-207922:126",
"207922:129-207922:242", "207922:274-207922:291", "207924:1-207924:52", "207924:54-207924:171", "207924:173-207924:178",
"207924:181-207924:339", "208307:2-208307:42", "208307:45", "208307:47-208307:70", "208307:72-208307:147",
"208307:150-208307:252", "208307:256-208307:259", "208307:262-208307:275", "208307:278-208307:342", "208307:345-208307:450",
"208307:453-208307:527", "208307:530-208307:583", "208307:586-208307:605", "208307:608-208307:616", "208307:618-208307:667",
"208307:670-208307:761", "208307:763-208307:798", "208307:800-208307:889", "208307:891-208307:893", "208307:896-208307:1055",
"208307:1057-208307:1205", "208307:1208-208307:1294", "208307:1297-208307:1328", "208339:77-208339:89", "208339:91-208339:122",
"208339:125-208339:208", "208339:211-208339:346", "208339:349-208339:363", "208341:1-208341:84", "208341:87-208341:117",
"208341:120-208341:513", "208341:515-208341:685", "208341:688-208341:693", "208341:695-208341:775", "208341:777-208341:824",
"208351:83-208351:97", "208351:100-208351:356", "208351:359-208351:367", "208351:369", "208352:1-208352:15",
"208352:17", "208352:19", "208353:1-208353:76", "208353:78-208353:269", "208353:271-208353:348",
"208357:1-208357:70", "208357:73-208357:507", "208390:72-208390:128", "208390:130-208390:169", "208391:52-208391:82",
"208391:84-208391:162", "208391:164-208391:216", "208391:219-208391:493", "208391:495-208391:498", "208391:500-208391:523",
"208391:526-208391:533", "208391:535-208391:588", "208391:591-208391:660", "208391:663-208391:869", "208427:49-208427:89",
"208427:92-208427:161", "208427:164", "208427:166-208427:173", "208427:175-208427:268", "208427:271-208427:312",
"208427:315", "208427:317-208427:335", "208427:337-208427:361", "208427:364-208427:402", "208427:404-208427:422",
"208427:425-208427:577", "208427:580-208427:647", "208428:1-208428:58", "208428:61-208428:68", "208428:70-208428:156",
"208428:159-208428:227", "208429:1-208429:56", "208429:59-208429:139", "208429:141-208429:159", "208429:162-208429:237",
"208429:240-208429:440", "208429:442-208429:452", "208429:455-208429:589", "208429:592-208429:712", "208429:715-208429:922",
"208487:2-208487:26", "208487:29-208487:159", "208487:161-208487:307", "208487:309-208487:459", "208487:462-208487:476",
"208487:479-208487:621", "208509:71-208509:232", "208538:2-208538:43", "208540:1-208540:26", "208540:29-208540:98",
"208541:1-208541:57", "208541:59-208541:173", "208541:175-208541:376", "208541:378-208541:413", "208551:119-208551:193",
"208551:195-208551:212", "208551:215-208551:300", "208551:303-208551:354", "208551:356-208551:554", "208551:557-208551:580",
"208686:73-208686:79", "208686:82-208686:181", "208686:183-208686:224", "208686:227-208686:243", "208686:246-208686:311",
"208686:313-208686:459" ) ),
duplicateCheckMode = cms.untracked.string('noDuplicateCheck'),
fileNames = cms.untracked.vstring('/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_5.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_50.root',
'/store/cmst3/user/cmgtools/CMG/DoubleMuParked/StoreResults-Run2012C_22Jan2013_v1_PFembedded_trans1_tau132_pthad1_30had2_30_v1-5ef1c0fd428eb740081f19333520fdc8/USER/V5_B/PAT_CMG_V5_16_0/cmgTuple_500.root')
)
| [
"[email protected]"
] | |
ef94b32dec93fe156549f2e821e7e2798f65812c | 5b0aebb53c33124b87c8655a5923858d6a2a5bc7 | /bm_preproc.py | 266dc2e049dc3c7569d58d10d38f24412cdec468 | [] | no_license | corylstewart/DNA-Class | 440e8c0304ea568347d2dad77424ee77a74f9e01 | 5706b95181ef7dd73a6a9d97cc879a50663ca60a | refs/heads/master | 2021-01-10T13:18:07.538528 | 2016-03-29T18:50:26 | 2016-03-29T18:50:26 | 55,001,838 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,110 | py | """bm_preproc.py: Boyer-Moore preprocessing."""
__author__ = "Ben Langmead"
def z_array(s):
""" Use Z algorithm (Gusfield theorem 1.4.1) to preprocess s """
assert len(s) > 1
z = [len(s)] + [0] * (len(s)-1)
# Initial comparison of s[1:] with prefix
for i in range(1, len(s)):
if s[i] == s[i-1]:
z[1] += 1
else:
break
r, l = 0, 0
if z[1] > 0:
r, l = z[1], 1
for k in range(2, len(s)):
assert z[k] == 0
if k > r:
# Case 1
for i in range(k, len(s)):
if s[i] == s[i-k]:
z[k] += 1
else:
break
r, l = k + z[k] - 1, k
else:
# Case 2
# Calculate length of beta
nbeta = r - k + 1
zkp = z[k - l]
if nbeta > zkp:
# Case 2a: zkp wins
z[k] = zkp
else:
# Case 2b: Compare characters just past r
nmatch = 0
for i in range(r+1, len(s)):
if s[i] == s[i - k]:
nmatch += 1
else:
break
l, r = k, r + nmatch
z[k] = r - k + 1
return z
def n_array(s):
""" Compile the N array (Gusfield theorem 2.2.2) from the Z array """
return z_array(s[::-1])[::-1]
def big_l_prime_array(p, n):
""" Compile L' array (Gusfield theorem 2.2.2) using p and N array.
L'[i] = largest index j less than n such that N[j] = |P[i:]| """
lp = [0] * len(p)
for j in range(len(p)-1):
i = len(p) - n[j]
if i < len(p):
lp[i] = j + 1
return lp
def big_l_array(p, lp):
""" Compile L array (Gusfield theorem 2.2.2) using p and L' array.
L[i] = largest index j less than n such that N[j] >= |P[i:]| """
l = [0] * len(p)
l[1] = lp[1]
for i in range(2, len(p)):
l[i] = max(l[i-1], lp[i])
return l
def small_l_prime_array(n):
""" Compile lp' array (Gusfield theorem 2.2.4) using N array. """
small_lp = [0] * len(n)
for i in range(len(n)):
if n[i] == i+1: # prefix matching a suffix
small_lp[len(n)-i-1] = i+1
for i in range(len(n)-2, -1, -1): # "smear" them out to the left
if small_lp[i] == 0:
small_lp[i] = small_lp[i+1]
return small_lp
def good_suffix_table(p):
""" Return tables needed to apply good suffix rule. """
n = n_array(p)
lp = big_l_prime_array(p, n)
return lp, big_l_array(p, lp), small_l_prime_array(n)
def good_suffix_mismatch(i, big_l_prime, small_l_prime):
""" Given a mismatch at offset i, and given L/L' and l' arrays,
return amount to shift as determined by good suffix rule. """
length = len(big_l_prime)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if big_l_prime[i] > 0:
return length - big_l_prime[i]
return length - small_l_prime[i]
def good_suffix_match(small_l_prime):
""" Given a full match of P to T, return amount to shift as
determined by good suffix rule. """
return len(small_l_prime) - small_l_prime[1]
def dense_bad_char_tab(p, amap):
""" Given pattern string and list with ordered alphabet characters, create
and return a dense bad character table. Table is indexed by offset
then by character. """
tab = []
nxt = [0] * len(amap)
for i in range(0, len(p)):
c = p[i]
assert c in amap
tab.append(nxt[:])
nxt[amap[c]] = i+1
return tab
class BoyerMoore(object):
""" Encapsulates pattern and associated Boyer-Moore preprocessing. """
def __init__(self, p, alphabet='ACGT'):
# Create map from alphabet characters to integers
self.amap = {alphabet[i]: i for i in range(len(alphabet))}
# Make bad character rule table
self.bad_char = dense_bad_char_tab(p, self.amap)
# Create good suffix rule table
_, self.big_l, self.small_l_prime = good_suffix_table(p)
def bad_character_rule(self, i, c):
""" Return # skips given by bad character rule at offset i """
assert c in self.amap
assert i < len(self.bad_char)
ci = self.amap[c]
return i - (self.bad_char[i][ci]-1)
def good_suffix_rule(self, i):
""" Given a mismatch at offset i, return amount to shift
as determined by (weak) good suffix rule. """
length = len(self.big_l)
assert i < length
if i == length - 1:
return 0
i += 1 # i points to leftmost matching position of P
if self.big_l[i] > 0:
return length - self.big_l[i]
return length - self.small_l_prime[i]
def match_skip(self):
""" Return amount to shift in case where P matches T """
return len(self.small_l_prime) - self.small_l_prime[1]
def naive_find_matches_with_counter(p, t):
matches = list()
total_comps = 0
for i in xrange(len(t)-len(p)+1):
matched = True
for j in range(len(p)):
total_comps += 1
if p[j] != t[i+j]:
matched = False
break
if matched:
matches.append(i)
return (total_comps, matches)
def boyer_moore_with_counter(p, p_bm, t):
""" Do Boyer-Moore matching. p=pattern, t=text,
p_bm=BoyerMoore object for p """
i = 0
total_comps = 0
while i < len(t) - len(p) + 1:
total_comps += 1
shift = 1
mismatched = False
for j in range(len(p)-1, -1, -1):
if p[j] != t[i+j]:
skip_bc = p_bm.bad_character_rule(j, t[i+j])
skip_gs = p_bm.good_suffix_rule(j)
shift = max(shift, skip_bc, skip_gs)
mismatched = True
break
if not mismatched:
skip_gs = p_bm.match_skip()
shift = max(shift, skip_gs)
i += shift
return total_comps
| [
"[email protected]"
] | |
099667299286cf88413adc62ba733f68c1b6a527 | 55b57d64ec547869835334318f3059fbb507558c | /Fred2/Data/pssms/smm/mat/A_02_02_9.py | 7a41146d9d9f5b3170add4863afcb1b9d7b5f894 | [
"BSD-3-Clause"
] | permissive | FRED-2/Fred2 | 9845f6678d4011cb746c7a5a6f283eea68077a02 | b3e54c8c4ed12b780b61f74672e9667245a7bb78 | refs/heads/master | 2021-07-12T05:05:54.515427 | 2020-05-25T06:56:25 | 2020-05-25T06:56:25 | 16,275,425 | 42 | 35 | null | 2021-07-07T12:05:11 | 2014-01-27T10:08:11 | Python | UTF-8 | Python | false | false | 2,302 | py | A_02_02_9 = {0: {'A': -0.145, 'C': 0.221, 'E': 0.72, 'D': 0.844, 'G': 0.058, 'F': -0.922, 'I': -0.135, 'H': 0.116, 'K': -0.195, 'M': -0.461, 'L': -0.138, 'N': 0.087, 'Q': 0.011, 'P': 0.503, 'S': -0.089, 'R': 0.099, 'T': 0.161, 'W': -0.221, 'V': 0.035, 'Y': -0.547}, 1: {'A': 0.108, 'C': 0.324, 'E': 0.89, 'D': 0.324, 'G': -0.085, 'F': -0.094, 'I': -0.572, 'H': 0.05, 'K': 0.233, 'M': -1.25, 'L': -1.345, 'N': 0.41, 'Q': -0.308, 'P': 1.043, 'S': -0.004, 'R': 0.877, 'T': -0.128, 'W': -0.272, 'V': -0.341, 'Y': 0.14}, 2: {'A': -0.513, 'C': 0.144, 'E': 0.353, 'D': 0.04, 'G': 0.163, 'F': -0.354, 'I': -0.132, 'H': 0.102, 'K': 0.352, 'M': -0.561, 'L': 0.233, 'N': -0.217, 'Q': 0.135, 'P': 0.1, 'S': -0.352, 'R': 0.425, 'T': 0.128, 'W': 0.149, 'V': -0.037, 'Y': -0.157}, 3: {'A': -0.172, 'C': -0.042, 'E': -0.216, 'D': -0.315, 'G': -0.157, 'F': 0.003, 'I': 0.129, 'H': 0.033, 'K': 0.103, 'M': 0.093, 'L': 0.145, 'N': 0.118, 'Q': 0.037, 'P': -0.045, 'S': -0.121, 'R': 0.226, 'T': 0.118, 'W': 0.026, 'V': 0.092, 'Y': -0.056}, 4: {'A': 0.035, 'C': -0.054, 'E': 0.023, 'D': 0.049, 'G': 0.109, 'F': -0.272, 'I': -0.3, 'H': -0.127, 'K': 0.131, 'M': 0.092, 'L': -0.107, 'N': 0.122, 'Q': 0.034, 'P': 0.264, 'S': 0.04, 'R': 0.161, 'T': 0.195, 'W': 0.052, 'V': -0.097, 'Y': -0.351}, 5: {'A': 0.099, 'C': -0.034, 'E': 0.087, 'D': 0.139, 'G': 0.167, 'F': -0.218, 'I': -0.196, 'H': 0.144, 'K': 0.449, 'M': -0.138, 'L': -0.265, 'N': -0.078, 'Q': -0.003, 'P': 0.028, 'S': -0.151, 'R': 0.218, 'T': -0.17, 'W': 0.112, 'V': -0.145, 'Y': -0.044}, 6: {'A': -0.116, 'C': 0.037, 'E': -0.098, 'D': -0.071, 'G': 0.241, 'F': -0.355, 'I': 0.156, 'H': -0.175, 'K': 0.554, 'M': -0.063, 'L': 0.183, 'N': -0.031, 'Q': 0.062, 'P': 0.19, 'S': -0.029, 'R': 0.47, 'T': -0.083, 'W': -0.39, 'V': -0.06, 'Y': -0.422}, 7: {'A': -0.048, 'C': 0.154, 'E': -0.175, 'D': 0.432, 'G': -0.001, 'F': -0.374, 'I': 0.173, 'H': 0.007, 'K': 0.243, 'M': 0.1, 'L': -0.233, 'N': -0.014, 'Q': -0.004, 'P': -0.08, 'S': -0.086, 'R': 0.077, 'T': 0.143, 'W': -0.157, 'V': 0.264, 'Y': -0.42}, 8: {'A': -0.423, 'C': 0.65, 'E': -0.065, 'D': -0.186, 'G': -0.273, 'F': 0.009, 'I': -0.619, 'H': 0.454, 'K': 0.779, 'M': -0.252, 'L': -0.945, 'N': -0.315, 'Q': 0.288, 'P': -0.101, 'S': 0.282, 'R': 0.578, 'T': 0.148, 'W': 0.44, 'V': -1.051, 'Y': 0.602}, -1: {'con': 4.16801}} | [
"[email protected]"
] | |
3b33c6da73e70bcb25b56b4fd175de4ac366f2a8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /9S8qp4XKG2qwQMdrb_2.py | 07908c297beae33944959e2c40e6e492d0f35bf6 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | """
Write a function that returns the number of ways a person can climb **n
stairs** , where the person may only climb **1** or **2** steps at a time.
To illustrate, if **n = 4** there are **5** ways to climb:
[1, 1, 1, 1]
[2, 1, 1]
[1, 2, 1]
[1, 1, 2]
[2, 2]
### Examples
ways_to_climb(1) ➞ 1
ways_to_climb(2) ➞ 2
ways_to_climb(5) ➞ 8
### Notes
A staircase of height `0` should return `1`.
"""
def ways_to_climb(n):
r=(1+5**.5)/2
return round((r**(n+1)-(1-r)**(n+1))/(5**.5))
| [
"[email protected]"
] | |
9fe4cb94c81a6b0a10f86ec898adfb99833b6625 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_nicking.py | 8ade774452ec36eabf9b8b12da80103b68a5a982 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 228 | py |
from xai.brain.wordbase.verbs._nick import _NICK
#calss header
class _NICKING(_NICK, ):
def __init__(self,):
_NICK.__init__(self)
self.name = "NICKING"
self.specie = 'verbs'
self.basic = "nick"
self.jsondata = {}
| [
"[email protected]"
] | |
44e5115d831d8f11ee4ec8b575906d3138700fbf | 348aeccddd5fdb48fb91a63d170b7f0453f70e36 | /libcloud/utils/files.py | 201e94a4e3a873553fc3a035aa2b8953785c0c0e | [
"Apache-2.0"
] | permissive | lelou6666/libcloud | 4eb08e236cb9f4b787fa73ce963347f708faf092 | bff26fe27fdd53979e32e08038ecd2fc108b6083 | refs/heads/trunk | 2021-01-14T14:02:16.661579 | 2013-10-28T11:18:08 | 2013-10-28T11:18:08 | 55,902,523 | 0 | 0 | null | 2016-04-10T14:08:20 | 2016-04-10T14:08:20 | null | UTF-8 | Python | false | false | 3,437 | py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import mimetypes
from libcloud.utils.py3 import PY3
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import next
from libcloud.utils.py3 import b
CHUNK_SIZE = 8096
if PY3:
from io import FileIO as file
def read_in_chunks(iterator, chunk_size=None, fill_size=False):
"""
Return a generator which yields data in chunks.
:type iterator: :class:`object` which implements iterator interface.
:param response: An object which implements an iterator interface
or a File like object with read method.
:type chunk_size: ``int``
:param chunk_size: Optional chunk size (defaults to CHUNK_SIZE)
:type fill_size: ``bool``
:param fill_size: If True, make sure chunks are chunk_size in length
(except for last chunk).
TODO: At some point in the future we could use byte arrays here if version
>= Python 3. This should speed things up a bit and reduce memory usage.
"""
chunk_size = chunk_size or CHUNK_SIZE
if isinstance(iterator, (file, httplib.HTTPResponse)):
get_data = iterator.read
args = (chunk_size, )
else:
get_data = next
args = (iterator, )
data = b('')
empty = False
while not empty or len(data) > 0:
if not empty:
try:
chunk = b(get_data(*args))
if len(chunk) > 0:
data += chunk
else:
empty = True
except StopIteration:
empty = True
if len(data) == 0:
raise StopIteration
if fill_size:
if empty or len(data) >= chunk_size:
yield data[:chunk_size]
data = data[chunk_size:]
else:
yield data
data = b('')
def exhaust_iterator(iterator):
"""
Exhaust an iterator and return all data returned by it.
:type iterator: :class:`object` which implements iterator interface.
:param response: An object which implements an iterator interface
or a File like object with read method.
:rtype ``str``
:return Data returned by the iterator.
"""
data = b('')
try:
chunk = b(next(iterator))
except StopIteration:
chunk = b('')
while len(chunk) > 0:
data += chunk
try:
chunk = b(next(iterator))
except StopIteration:
chunk = b('')
return data
def guess_file_mime_type(file_path):
filename = os.path.basename(file_path)
(mimetype, encoding) = mimetypes.guess_type(filename)
return mimetype, encoding
| [
"[email protected]"
] | |
cead28e09d8898e94fd635d1ede4ab5cabf171fe | 16b77438b7a7923a391a12f1f4bc12b49429bb73 | /src/PIPE/PIPE.py | afa369355271987d911ce5454c61b803916fa8aa | [] | no_license | OpenJ92/zebra | eb582c36fd7110ccf5866eb34418ff9e725efd5d | 2d3d3d42bb0461901f2418069a55e47cf8450c50 | refs/heads/master | 2020-11-29T14:29:37.279589 | 2020-01-18T19:54:07 | 2020-01-18T19:54:07 | 230,138,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | from src.NODE.NODE import NODE
class PIPE(object):
def __init__(self, form):
self._name, self._kwargs = *form.keys(), *form.values()
self.__gen_nodes__();
self._transformed = self.__execute__({'Data1':1, 'Data2':1})
def __gen_nodes__(self):
self._nodes = [NODE(kw) for kw in self._kwargs]
self._nodes = {f"{self._name}_{node._name}": node \
for node in self._nodes}
def __execute__(self, Xs):
node = self._nodes[f"{self._name}_HEAD"]
while True:
print(Xs, node._name)
Xs = { \
name: \
(node._map._apply_(data) if name in node._on else data)\
for name, data in Xs.items() \
}
if "TAIL" in node._name:
return Xs
node = self._nodes[f"{self._name}_{next(node)}"]
return Xs
| [
"[email protected]"
] | |
19b365204ddcf74e34ab42a5f2b0d756622c9ad5 | ca55dcaa64ea9db4068e13091321cfebecc0ff41 | /codeUp/codeUpBasic/1990.py | bca5b69987f830843fdbdeecd27fbb8549319697 | [] | no_license | gomtinQQ/algorithm-python | 8fb8343594b945099ae2a4dfa794ecb47e54ab0b | 751562922b66e335f621d366bb73dacdc7125140 | refs/heads/master | 2022-12-07T23:05:44.535593 | 2020-08-21T12:29:58 | 2020-08-21T12:29:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | '''
1990 : 3의 배수 판별하기
자연수 n이 입력되면 3의 배수인지 아닌지 판별하시오.
3의 배수이면 1을 출력하고, 아니면 0을 출력한다.
'''
n = int(input())
if(n%3==0):
print(1)
else:
print(0) | [
"[email protected]"
] | |
0f035ba1c524afe06432726820c34655340ac8c6 | 82fce9aae9e855a73f4e92d750e6a8df2ef877a5 | /Lab/venv/lib/python3.8/site-packages/OpenGL/raw/GL/ARB/texture_storage_multisample.py | fdcdbc160823f7a5f0c538918cf1a7c652b4e9a0 | [] | no_license | BartoszRudnik/GK | 1294f7708902e867dacd7da591b9f2e741bfe9e5 | 6dc09184a3af07143b9729e42a6f62f13da50128 | refs/heads/main | 2023-02-20T19:02:12.408974 | 2021-01-22T10:51:14 | 2021-01-22T10:51:14 | 307,847,589 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 879 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p
# End users want this...
from OpenGL.raw.GL import _errors
# Code generation uses this
from OpenGL.raw.GL import _types as _cs
_EXTENSION_NAME = 'GL_ARB_texture_storage_multisample'
def _f(function):
return _p.createFunction(function, _p.PLATFORM.GL, 'GL_ARB_texture_storage_multisample',
error_checker=_errors._error_checker)
@_f
@_p.types(None, _cs.GLenum, _cs.GLsizei, _cs.GLenum, _cs.GLsizei, _cs.GLsizei, _cs.GLboolean)
def glTexStorage2DMultisample(target, samples, internalformat, width, height, fixedsamplelocations): pass
@_f
@_p.types(None, _cs.GLenum, _cs.GLsizei, _cs.GLenum, _cs.GLsizei, _cs.GLsizei, _cs.GLsizei, _cs.GLboolean)
def glTexStorage3DMultisample(target,samples,internalformat,width,height,depth,fixedsamplelocations):pass
| [
"[email protected]"
] | |
8a6874d0099dce3f2d73698422596393937926c4 | 1422a57e98aba02321b772d72f8f0ada6d8b8cba | /mm/models/shared/augmentation.py | 91ccf3fae4c30c7c4b6af2cc19bd690100302532 | [
"MIT"
] | permissive | JonasRSV/Friday | e1908a411aa133bc5bd2f383b0a995f7e028092d | f959eff95ba7b11525f97099c8f5ea0e325face7 | refs/heads/main | 2023-05-15T03:33:21.542621 | 2021-06-12T10:34:50 | 2021-06-12T10:34:50 | 315,309,991 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 604 | py | from typing import List
import tensorflow as tf
import numpy as np
import models.shared.augmentations as a
import random
def create_audio_augmentations(aug: List[a.Augmentation], p: np.ndarray):
if len(aug) != len(p):
raise ValueError(f"Length of augmentations must match distribution {len(aug)} != {len(p)}")
def audio_augmentations(audio: np.ndarray, sample_rate: int):
for aug_to_apply, with_prob in zip(aug, p):
if np.random.rand() < with_prob:
audio = aug_to_apply.apply(audio, sample_rate)
return audio
return audio_augmentations
| [
"[email protected]"
] | |
d613832fb1e4fbf8daf1f43cb77c47396088f146 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_181/ch4_2020_03_05_16_07_05_989464.py | dff125cf8d4a74499e2b22478368603f7e78b838 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | def classifica_idade(i):
if i<12:
return 'crianca'
if 18>i>12:
return 'adolescente'
else:
return 'adulto'
| [
"[email protected]"
] | |
83b9b89602f94805f1ff6283f7237c42100ead2a | f5a7de717f41f8379ccdee7d06de838fdf1d0a0b | /soloperformance-api/apps/catalog/management/commands/exercises.py | b73d1df31fb2d914106dd6d80bd4253425dbe55c | [] | no_license | jimmy818/mexico-angular | 977e4d1d0cab2ff8c10c9892d9c72ca2f4f9ac49 | 005ed3729b807d77a8fd97a3b5469a42ceefdaad | refs/heads/main | 2023-08-10T21:37:53.614298 | 2021-05-11T19:04:29 | 2021-05-11T19:04:29 | 366,485,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py |
from django.core.management.base import BaseCommand, CommandError
from django.http import HttpRequest
import requests
import xlrd
from apps.catalog import utils
class Command(BaseCommand):
help = 'Add exercises'
def handle(self, *args, **options):
request = HttpRequest()
r = requests.get('https://d2femlmiaazi1b.cloudfront.net/media/excel/DB_Drills.xlsx')
with open('/tmp/excel.xlsx', 'wb') as f:
f.write(r.content)
path = '/tmp/excel.xlsx'
book = xlrd.open_workbook(path)
# sheets = book.sheet_names()
sheet_0 = book.sheet_by_index(0) # Open the first tab
## this range is for excercices length
for row_index in range(1012):
if row_index > 3:
excercice = None
for col_index in range(sheet_0.ncols):
item = sheet_0.cell(rowx=row_index,colx=col_index).value
if excercice == None:
excercice = item
excercice_item = utils.get_or_add_excercice(excercice)
else:
if item != None and item != '':
utils.add_sub_excercice(excercice_item,sheet_0.cell(rowx=3,colx=col_index).value)
print(excercice)
print(sheet_0.cell(rowx=3,colx=col_index).value)
self.stdout.write(self.style.SUCCESS('Successfully.....')) | [
"[email protected]"
] | |
84555327ae07d2945fac7b3d7ca618e1946fb291 | e56214188faae8ebfb36a463e34fc8324935b3c2 | /intersight/models/workflow_default_value_ref.py | 18613e62146e7f7c285e489454fb63c30fab824b | [
"Apache-2.0"
] | permissive | CiscoUcs/intersight-python | 866d6c63e0cb8c33440771efd93541d679bb1ecc | a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4 | refs/heads/master | 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 | Apache-2.0 | 2020-03-02T16:19:49 | 2017-12-26T17:14:03 | Python | UTF-8 | Python | false | false | 5,734 | py | # coding: utf-8
"""
Cisco Intersight OpenAPI specification.
The Cisco Intersight OpenAPI specification.
OpenAPI spec version: 1.0.9-1461
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class WorkflowDefaultValueRef(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'object_type': 'str',
'moid': 'str',
'selector': 'str'
}
attribute_map = {
'object_type': 'ObjectType',
'moid': 'Moid',
'selector': 'Selector'
}
def __init__(self, object_type=None, moid=None, selector=None):
"""
WorkflowDefaultValueRef - a model defined in Swagger
"""
self._object_type = None
self._moid = None
self._selector = None
if object_type is not None:
self.object_type = object_type
if moid is not None:
self.moid = moid
if selector is not None:
self.selector = selector
@property
def object_type(self):
"""
Gets the object_type of this WorkflowDefaultValueRef.
The Object Type of the referenced REST resource.
:return: The object_type of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""
Sets the object_type of this WorkflowDefaultValueRef.
The Object Type of the referenced REST resource.
:param object_type: The object_type of this WorkflowDefaultValueRef.
:type: str
"""
self._object_type = object_type
@property
def moid(self):
"""
Gets the moid of this WorkflowDefaultValueRef.
The Moid of the referenced REST resource.
:return: The moid of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._moid
@moid.setter
def moid(self, moid):
"""
Sets the moid of this WorkflowDefaultValueRef.
The Moid of the referenced REST resource.
:param moid: The moid of this WorkflowDefaultValueRef.
:type: str
"""
self._moid = moid
@property
def selector(self):
"""
Gets the selector of this WorkflowDefaultValueRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:return: The selector of this WorkflowDefaultValueRef.
:rtype: str
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this WorkflowDefaultValueRef.
An OData $filter expression which describes the REST resource to be referenced. This field may be set instead of 'moid' by clients. If 'moid' is set this field is ignored. If 'selector' is set and 'moid' is empty/absent from the request, Intersight will determine the Moid of the resource matching the filter expression and populate it in the MoRef that is part of the object instance being inserted/updated to fulfill the REST request. An error is returned if the filter matches zero or more than one REST resource. An example filter string is: Serial eq '3AA8B7T11'.
:param selector: The selector of this WorkflowDefaultValueRef.
:type: str
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, WorkflowDefaultValueRef):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
] | |
1727d04b8a7d1014b6e1d7a1ae539f023ea9f601 | 1713334f9b68255f9adab70175c21f399d0460f3 | /python/125_Valid_Palindrome.py | 4d198f026b9d9fad4550fee87f5e98972fb8c355 | [
"MIT"
] | permissive | coy0725/leetcode | 0a798b7adafe80f726e51c06c34835c4aa51b563 | 743a0bfa22402ec39858dc9c4c7dc531f825b953 | refs/heads/master | 2020-05-21T18:25:09.683714 | 2019-05-11T13:00:40 | 2019-05-11T13:00:40 | 186,132,894 | 2 | 0 | MIT | 2019-05-11T12:55:22 | 2019-05-11T12:55:21 | null | UTF-8 | Python | false | false | 395 | py | class Solution(object):
def isPalindrome(self, s):
"""
:type s: str
:rtype: bool
"""
alnum_s = [t.lower() for t in s if t.isalnum()]
ls = len(alnum_s)
if ls <= 1:
return True
mid = ls / 2
for i in range(mid):
if alnum_s[i] != alnum_s[ls - 1 - i]:
return False
return True | [
"[email protected]"
] | |
d9e06504505b6a186387d2ff84264d0ecf0308fb | 83d657c787529f01a8ecc8a874421738a7eecec7 | /Paths/Harmonise Curve to Line.py | 753600a50daceb8ddc9121810ba918269ff339b9 | [
"Apache-2.0"
] | permissive | BurgAndOeden/Glyphs-Scripts | e31b5164b491dfe0cd2d57f6cf1422c4aadda104 | f0195d6b8f0a6c055e4e44d5ef41ba48bdd1e3a6 | refs/heads/master | 2020-09-16T08:01:06.345898 | 2019-11-24T00:15:44 | 2019-11-24T00:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,124 | py | #MenuTitle: Harmonise Curve to Line
# -*- coding: utf-8 -*-
__doc__="""
Maximises opposing handles and reduces adjacent handles of line segments.
"""
from Foundation import NSPoint
def intersectionWithNSPoints( pointA, pointB, pointC, pointD ):
"""
Returns an NSPoint of the intersection AB with CD.
Or False if there is no intersection
"""
try:
x1, y1 = pointA.x, pointA.y
x2, y2 = pointB.x, pointB.y
x3, y3 = pointC.x, pointC.y
x4, y4 = pointD.x, pointD.y
try:
slope12 = ( float(y2) - float(y1) ) / ( float(x2) - float(x1) )
except:
# division by zero if vertical
slope12 = None
try:
slope34 = ( float(y4) - float(y3) ) / ( float(x4) - float(x3) )
except:
# division by zero if vertical
slope34 = None
if slope12 == slope34:
# parallel, no intersection
return None
elif slope12 is None:
# first line is vertical
x = x1
y = slope34 * ( x - x3 ) + y3
elif slope34 is None:
# second line is vertical
x = x3
y = slope12 * ( x - x1 ) + y1
else:
# both lines have an angle
x = ( slope12 * x1 - y1 - slope34 * x3 + y3 ) / ( slope12 - slope34 )
y = slope12 * ( x - x1 ) + y1
intersectionPoint = NSPoint( x, y )
if bothPointsAreOnSameSideOfOrigin( intersectionPoint, pointB, pointA ) and bothPointsAreOnSameSideOfOrigin( intersectionPoint, pointC, pointD ):
if pointIsBetweenOtherPoints( intersectionPoint, pointB, pointA ) or pointIsBetweenOtherPoints( intersectionPoint, pointC, pointD ):
return None
return intersectionPoint
else:
return None
except Exception as e:
print str(e)
import traceback
print traceback.format_exc()
return None
def pointDistance( P1, P2 ):
"""Calculates the distance between P1 and P2."""
x1, y1 = P1.x, P1.y
x2, y2 = P2.x, P2.y
dist = ( ( float(x2) - float(x1) ) ** 2 + ( float(y2) - float(y1) ) **2 ) ** 0.5
return dist
def bezier( x1, y1, x2,y2, x3,y3, x4,y4, t ):
x = x1*(1-t)**3 + x2*3*t*(1-t)**2 + x3*3*t**2*(1-t) + x4*t**3
y = y1*(1-t)**3 + y2*3*t*(1-t)**2 + y3*3*t**2*(1-t) + y4*t**3
return x, y
def bothPointsAreOnSameSideOfOrigin( pointA, pointB, pointOrigin ):
returnValue = True
xDiff = (pointA.x-pointOrigin.x) * (pointB.x-pointOrigin.x)
yDiff = (pointA.y-pointOrigin.y) * (pointB.y-pointOrigin.y)
if xDiff <= 0.0 and yDiff <= 0.0:
returnValue = False
return returnValue
def pointIsBetweenOtherPoints( thisPoint, otherPointA, otherPointB) :
returnValue = False
xDiffAB = otherPointB.x - otherPointA.x
yDiffAB = otherPointB.y - otherPointA.y
xDiffAP = thisPoint.x - otherPointA.x
yDiffAP = thisPoint.y - otherPointA.y
xDiffFactor = divideAndTolerateZero( xDiffAP, xDiffAB )
yDiffFactor = divideAndTolerateZero( yDiffAP, yDiffAB )
if xDiffFactor:
if 0.0<=xDiffFactor<=1.0:
returnValue = True
if yDiffFactor:
if 0.0<=xDiffFactor<=1.0:
returnValue = True
return returnValue
def divideAndTolerateZero( dividend, divisor ):
if float(divisor) == 0.0:
return None
else:
return dividend/divisor
def handleLength(a,b,intersection):
return pointDistance(a,b)/pointDistance(a,intersection)
def moveHandle(a,b,intersection,bPercentage):
x = a.x + (intersection.x-a.x) * bPercentage
y = a.y + (intersection.y-a.y) * bPercentage
return NSPoint(x,y)
Font = Glyphs.font
if len(Font.selectedLayers) > 1:
selectionCounts = False
elif not Font.selectedLayers[0].selection:
selectionCounts = False
else:
selectionCounts = True
for selectedLayer in Font.selectedLayers:
selectedGlyph = selectedLayer.parent
selectedGlyph.beginUndo()
# put original state in background:
selectedLayer.contentToBackgroundCheckSelection_keepOldBackground_(False,False)
for path in selectedLayer.paths:
for n in path.nodes:
processedHandles = []
if (n.selected or not selectionCounts) and n.type == OFFCURVE:
# determine the segment:
if n.prevNode.type == OFFCURVE:
a = n.prevNode.prevNode
b = n.prevNode
c = n
d = n.nextNode
else:
a = n.prevNode
b = n
c = n.nextNode
d = n.nextNode.nextNode
if not a in processedHandles and not b in processedHandles:
# intersection of the magic triangle:
intersection = intersectionWithNSPoints( a.position, b.position, c.position, d.position )
if intersection:
# calculate percentages:
bLength = handleLength(a,b,intersection)
cLength = handleLength(d,c,intersection)
shortLength = (abs(bLength) + abs(cLength) - 1.0) - (1.0-abs(bLength))*(1.0-abs(cLength))
if d.nextNode.type == LINE and a.prevNode.type != LINE and d.connection == GSSMOOTH:
# max handle:
b.position = intersection
# reduced handle:
c.position = moveHandle(d,c,intersection,shortLength)
elif a.prevNode.type == LINE and d.nextNode.type != LINE and a.connection == GSSMOOTH:
# max handle:
c.position = intersection
# reduced handle:
b.position = moveHandle(a,b,intersection,shortLength)
# mark handles as processed:
processedHandles.append(a)
processedHandles.append(b)
selectedGlyph.endUndo()
| [
"[email protected]"
] | |
998dbc4a900cf93aa3ee0d2e520aed575aca4de5 | 02ad25c4ac78a98b5493a2aa7f744a77f381aaae | /dashboard_app/migrations/0010_auto_20201211_0846.py | 2168834a1f6db118e06a45e41521adce387ce856 | [] | no_license | cavidanhasanli/Havayi | 1f85d0d7608c964b0ddc80e3b526b32cdb81e8bf | bd30c9e3e700c7381b5961b5051cbcb398adc449 | refs/heads/main | 2023-02-03T09:25:03.866784 | 2020-12-22T18:09:07 | 2020-12-22T18:09:07 | 316,319,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # Generated by Django 3.1.3 on 2020-12-11 08:46
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0009_auto_20201211_0839'),
]
operations = [
migrations.DeleteModel(
name='CreditTypeInterest',
),
migrations.AddField(
model_name='banklist',
name='credit_type',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='dashboard_app.creditfields'),
),
migrations.AddField(
model_name='banklist',
name='interest',
field=models.FloatField(blank=True, default=0, null=True, validators=[django.core.validators.MinValueValidator(0.1), django.core.validators.MaxValueValidator(100)]),
),
]
| [
"[email protected]"
] | |
d4e96ddfa8c091f87bd220375da45cf8ce6295f4 | 679ce4b323f79b2425976201324c6c1f88b95199 | /Python/Stanley Cup/csv_parser.py | 53294c7db661e390948575da2be855cee905e598 | [] | no_license | abriggs914/Coding_Practice | ff690fb5f145a11f4da144f3882b37f473b10450 | 3afd7c59e0d90f0ef5f6203853e69f853312019b | refs/heads/master | 2023-08-31T04:04:58.048554 | 2023-08-29T13:23:29 | 2023-08-29T13:23:29 | 161,865,421 | 0 | 1 | null | 2022-10-27T08:35:29 | 2018-12-15T03:20:14 | Python | UTF-8 | Python | false | false | 7,689 | py | import csv
from utility import *
file_name = "past winners.csv"
# skip 2005 back fill
with open(file_name) as csv_file:
lines = csv.DictReader(csv_file)
data_by_year = {}
header = lines.fieldnames
print("header", header)
last = None
for i, line in enumerate(lines):
if last is not None:
if any([val is None or val == "" for val in line.values()]):
#print("missing values, check last:", last)
if line["Year"] == "2005":
continue
for last_key, curr_key in zip(last, line):
last_val = last[last_key]
curr_val = line[curr_key]
if curr_val is None or curr_val == "":
line[curr_key] = last_val
line["Winning Team"] = line["Winning Team"].split("(")[0].strip()
line["Losing Team"] = line["Losing Team"].split("(")[0].strip()
print(dict_print(line))
data_by_year[str(line["Year"])] = line
if 0 < i:
last = line
data_by_year = {k:v for k, v in data_by_year.items() if "1995" <= k}
print(dict_print(data_by_year, "data_by_year"))
data_by_team = {}
data_by_coach = {}
first_year = None
last_year = None
for key, val in data_by_year.items():
year = int(key)
if first_year is None:
first_year = year
if last_year is None or year > last_year:
last_year = year
w_team = val["Winning Team"]
l_team = val["Losing Team"]
if w_team not in data_by_team:
data_by_team[w_team] = {"WYear": [], "LYear": [], "appearances": 0}
if l_team not in data_by_team:
data_by_team[l_team] = {"WYear": [], "LYear": [], "appearances": 0}
data_by_team[w_team]["WYear"].append(key)
data_by_team[l_team]["LYear"].append(key)
data_by_team[w_team]["appearances"] += 1
data_by_team[l_team]["appearances"] += 1
data_by_team[w_team]["W% (per appearance)"] = len(data_by_team[w_team]["WYear"]) / data_by_team[w_team]["appearances"]
data_by_team[l_team]["W% (per appearance)"] = len(data_by_team[l_team]["WYear"]) / data_by_team[l_team]["appearances"]
data_by_team[l_team]["L% (per appearance)"] = len(data_by_team[l_team]["LYear"]) / data_by_team[l_team]["appearances"]
data_by_team[w_team]["L% (per appearance)"] = len(data_by_team[w_team]["LYear"]) / data_by_team[w_team]["appearances"]
w_coach = val["WCoach"]
l_coach = val["LCoach"]
if w_coach not in data_by_coach:
data_by_coach[w_coach] = {"WYear": [], "LYear": [], "appearances": 0}
if l_coach not in data_by_coach:
data_by_coach[l_coach] = {"WYear": [], "LYear": [], "appearances": 0}
data_by_coach[w_coach]["WYear"].append(key)
data_by_coach[l_coach]["LYear"].append(key)
data_by_coach[w_coach]["appearances"] += 1
data_by_coach[l_coach]["appearances"] += 1
data_by_coach[w_coach]["W% (per appearance)"] = percent(len(data_by_coach[w_coach]["WYear"]) / data_by_coach[w_coach]["appearances"])
data_by_coach[l_coach]["W% (per appearance)"] = percent(len(data_by_coach[l_coach]["WYear"]) / data_by_coach[l_coach]["appearances"])
data_by_coach[l_coach]["L% (per appearance)"] = percent(len(data_by_coach[l_coach]["LYear"]) / data_by_coach[l_coach]["appearances"])
data_by_coach[w_coach]["L% (per appearance)"] = percent(len(data_by_coach[w_coach]["LYear"]) / data_by_coach[w_coach]["appearances"])
teams_list = list(data_by_team.keys())
teams_list.sort()
for team in data_by_team:
w_list = data_by_team[team]["WYear"]
l_list = data_by_team[team]["LYear"]
data_by_team[team]["Appearance % ({} to {})".format(first_year, last_year)] = percent((len(w_list) + len(l_list)) / (last_year - first_year))
data_by_team[team]["Appearance W% ({} to {})".format(first_year, last_year)] = percent(len(w_list) / (last_year - first_year))
data_by_team[team]["Appearance L% ({} to {})".format(first_year, last_year)] = percent(len(l_list) / (last_year - first_year))
#data_by_team[team]["won_against"] = []
#data_by_team[team]["lost_against"] = []
greatest_rival = None
most_lost_to = None
most_won_against = None
for team_b in teams_list:
# if team != team_b:
if team_b not in data_by_team[team]:
data_by_team[team][team_b] = {"won_against": [], "lost_against": []}
for year in data_by_team[team]["WYear"]:
if data_by_year[year]["Losing Team"] == team_b:
data_by_team[team][team_b]["won_against"].append(year)
for year in data_by_team[team]["LYear"]:
if data_by_year[year]["Winning Team"] == team_b:
data_by_team[team][team_b]["lost_against"].append(year)
if greatest_rival is None:
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["won_against"]) + len(data_by_team[team][team_b]["lost_against"]) > len(greatest_rival[1]):
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["won_against"]) + len(data_by_team[team][team_b]["lost_against"]) == len(greatest_rival[1]):
if data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"]:
if max(data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"]) > max(greatest_rival[1]):
greatest_rival = (team_b, data_by_team[team][team_b]["won_against"] + data_by_team[team][team_b]["lost_against"])
if most_lost_to is None:
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["lost_against"]) > len(most_lost_to[1]):
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
elif len(data_by_team[team][team_b]["lost_against"]) == len(most_lost_to[1]):
if data_by_team[team][team_b]["lost_against"]:
if max(data_by_team[team][team_b]["lost_against"]) > max(most_lost_to[1]):
most_lost_to = (team_b, data_by_team[team][team_b]["lost_against"])
if most_won_against is None:
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
elif len(data_by_team[team][team_b]["won_against"]) > len(most_won_against[1]):
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
elif len(data_by_team[team][team_b]["won_against"]) == len(most_won_against[1]):
if data_by_team[team][team_b]["won_against"]:
if max(data_by_team[team][team_b]["won_against"]) > max(most_won_against[1]):
most_won_against = (team_b, data_by_team[team][team_b]["won_against"])
data_by_team[team]["greatest_rival"] = greatest_rival
if most_lost_to[1]:
data_by_team[team]["most_lost_to"] = most_lost_to
if most_won_against[1]:
data_by_team[team]["most_won_against"] = most_won_against
print(dict_print(data_by_team, "Data By Team"))
print("parsed teams:\n", "\n".join(teams_list))
for coach in data_by_coach:
w_list = data_by_coach[coach]["WYear"]
l_list = data_by_coach[coach]["LYear"]
data_by_coach[coach]["Appearance % ({} to {})".format(first_year, last_year)] = (len(w_list) + len(l_list)) / (last_year - first_year)
data_by_coach[coach]["Appearance W% ({} to {})".format(first_year, last_year)] = len(w_list) / (last_year - first_year)
data_by_coach[coach]["Appearance L% ({} to {})".format(first_year, last_year)] = len(l_list) / (last_year - first_year)
print(dict_print(data_by_coach, "Data By Team"))
coaches_list = list(data_by_coach.keys())
coaches_list.sort()
print("parsed coaches:\n", "\n".join(coaches_list))
# count # time each team / coach has won.
# count # time each team met and won/lost against each other team.
# count # GWG -> period, timeOfPeriod
| [
"[email protected]"
] | |
ee824e6b9b7691a064d6ec0a0a4aca640c8d4611 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/preclud.py | 92806b29aea1beb79e849a1ee0a0da996f253cc9 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,193 | py | ii = [('BentJDO2.py', 2), ('CookGHP3.py', 2), ('LyelCPG2.py', 1), ('MarrFDI.py', 1), ('RogePAV2.py', 5), ('CoolWHM2.py', 1), ('GodwWSL2.py', 6), ('RogePAV.py', 6), ('WilbRLW4.py', 1), ('ProuWCM.py', 2), ('AubePRP2.py', 10), ('CookGHP.py', 4), ('MartHSI2.py', 5), ('WilkJMC3.py', 1), ('AubePRP.py', 16), ('ChalTPW2.py', 3), ('AdamWEP.py', 2), ('WilbRLW2.py', 2), ('ClarGE2.py', 4), ('CoopJBT2.py', 1), ('AinsWRR3.py', 2), ('CookGHP2.py', 3), ('KiddJAE.py', 1), ('AdamHMM.py', 3), ('ClarGE.py', 11), ('LyelCPG.py', 4), ('DibdTRL2.py', 1), ('AinsWRR.py', 1), ('WadeJEB.py', 7), ('TalfTIT.py', 2), ('CoopJBT.py', 2), ('KirbWPW2.py', 3), ('SoutRD2.py', 2), ('BackGNE.py', 1), ('MedwTAI2.py', 4), ('WheeJPT.py', 6), ('MereHHB3.py', 1), ('MereHHB.py', 1), ('WilkJMC.py', 3), ('MartHRW.py', 2), ('FitzRNS4.py', 1), ('CoolWHM3.py', 1), ('BentJRP.py', 6), ('StorJCC.py', 8), ('MackCNH2.py', 1), ('BellCHM.py', 1), ('JacoWHI2.py', 1), ('WilbRLW3.py', 1), ('ClarGE3.py', 4), ('MartHRW2.py', 1), ('DibdTRL.py', 1), ('FitzRNS2.py', 3), ('HogaGMM2.py', 2), ('MartHSI.py', 6), ('EvarJSP.py', 6), ('DwigTHH.py', 5), ('LyelCPG3.py', 2), ('TaylIF.py', 4), ('WordWYR.py', 2), ('KeigTSS.py', 1), ('KirbWPW.py', 1)] | [
"[email protected]"
] | |
4eb48a87e664b4cabd5416d2d6729ed9a88b43a1 | 49cd9ba075ed2ab6b267f6e012bfb03267b7bc08 | /project_42_formsModelpagination/app42/forms.py | 99db23b3c75ea231d95bd12b4e9224ed18e651db | [] | no_license | Satputev/DjangoApps | 4d47a76f20815b2b1313e8b3e3c61b5406f5da60 | c6fb5e9fa131f07d1f5920e98699f9daaa49d424 | refs/heads/master | 2023-02-14T00:42:36.037749 | 2020-12-24T07:39:54 | 2020-12-24T07:39:54 | 323,857,826 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py | from django import forms
from app42.models import ProductsModel
from django.forms import ValidationError
class ProductForm(forms.ModelForm):
class Meta:
model=ProductsModel
fields='__all__'
exclude=('pid',)
labels={'pname':'Product Name','pprice':'Product Price','pimg':'Product Image'}
def clean_pprice(self):
price=self.cleaned_data['pprice']
if price < 1:
raise ValidationError('price should be greater than "0"')
else:
return price
| [
"[email protected]"
] | |
694b8b138f3b4862d4b35953cdb3675a91e2a179 | fd25231975acd147e04dc3ed3627c92cb1a4f86c | /FlaskAPI/vir_env/lib/python3.7/site-packages/scipy/spatial/tests/test_distance.py | c0b831a2879fa2a21e753350d7b7edefe48591cf | [] | no_license | sumitkutty/Flight-Price-Prediction | 832a2802a3367e655b46d3b44f073d917abd2320 | d974a8b75fbcbfa42f11703602af3e45a3f08b3c | refs/heads/master | 2022-12-25T07:13:06.375888 | 2020-10-08T18:46:44 | 2020-10-08T18:46:44 | 302,366,725 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:a99a0a8e8696f85040b18a7b95996060265dec4c0607ab9bc90551e2f2dc9bd2
size 81424
| [
"[email protected]"
] | |
035f453b189a37c9677088804e6c18447aabdbbe | 75dcb56e318688499bdab789262839e7f58bd4f6 | /_algorithms_challenges/leetcode/LeetCode/733 Flood Fill.py | 4350e4e56af74a61b1f948707760e1b580de0573 | [] | no_license | syurskyi/Algorithms_and_Data_Structure | 9a1f358577e51e89c862d0f93f373b7f20ddd261 | 929dde1723fb2f54870c8a9badc80fc23e8400d3 | refs/heads/master | 2023-02-22T17:55:55.453535 | 2022-12-23T03:15:00 | 2022-12-23T03:15:00 | 226,243,987 | 4 | 1 | null | 2023-02-07T21:01:45 | 2019-12-06T04:14:10 | Jupyter Notebook | UTF-8 | Python | false | false | 2,124 | py | #!/usr/bin/python3
"""
An image is represented by a 2-D array of integers, each integer representing
the pixel value of the image (from 0 to 65535).
Given a coordinate (sr, sc) representing the starting pixel (row and column) of
the flood fill, and a pixel value newColor, "flood fill" the image.
To perform a "flood fill", consider the starting pixel, plus any pixels
connected 4-directionally to the starting pixel of the same color as the
starting pixel, plus any pixels connected 4-directionally to those pixels (also
with the same color as the starting pixel), and so on. Replace the color of all
of the aforementioned pixels with the newColor.
At the end, return the modified image.
Example 1:
Input:
image = [[1,1,1],[1,1,0],[1,0,1]]
sr = 1, sc = 1, newColor = 2
Output: [[2,2,2],[2,2,0],[2,0,1]]
Explanation:
From the center of the image (with position (sr, sc) = (1, 1)), all pixels
connected
by a path of the same color as the starting pixel are colored with the new
color.
Note the bottom corner is not colored 2, because it is not 4-directionally
connected
to the starting pixel.
Note:
The length of image and image[0] will be in the range [1, 50].
The given starting pixel will satisfy 0 <= sr < image.length and 0 <= sc <
image[0].length.
The value of each color in image[i][j] and newColor will be an integer in
[0, 65535].
"""
from typing import List
dirs = ((-1, 0), (1, 0), (0, -1), (0, 1))
class Solution:
def floodFill(self, image: List[List[int]], sr: int, sc: int, newColor: int) -> List[List[int]]:
"""
dfs fill
mistake: corner case image == new color
"""
cur_color = image[sr][sc]
if cur_color == newColor:
return image
self.dfs(image, sr, sc, cur_color, newColor)
return image
def dfs(self, image, i, j, cur_color, new_color):
image[i][j] = new_color
m, n = len(image), len(image[0])
for di, dj in dirs:
I = i + di
J = j + dj
if 0 <= I < m and 0 <= J < n and image[I][J] == cur_color:
self.dfs(image, I, J, cur_color, new_color)
| [
"[email protected]"
] | |
7d375196ec6a89c43b9391ff60129464324ce322 | f4fdb0c1213bbb403b87c2dbbde390918ac08861 | /convert_uk_decl_num3.py | accb16c1dd9181350a97f4be6023784d4fd9b64a | [] | no_license | benwing2/RuNounChanges | 0d5076e576237f10b50049ed52b91f96c95cca95 | 048dfed5abe09b8d5629c5772292027ce0a170f2 | refs/heads/master | 2023-09-03T22:48:06.972127 | 2023-09-03T06:27:56 | 2023-09-03T06:27:56 | 41,480,942 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,246 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pywikibot, re, sys, argparse
import blib
from blib import getparam, rmparam, tname, pname, msg, errandmsg, site
def process_text_on_page(index, pagetitle, text):
global args
def pagemsg(txt):
msg("Page %s %s: %s" % (index, pagetitle, txt))
notes = []
parsed = blib.parse_text(text)
for t in parsed.filter_templates():
tn = tname(t)
origt = str(t)
def getp(param):
return getparam(t, param)
if tn == "uk-decl-num3":
def clean_part(part):
return blib.remove_links(part).replace(" ", "").strip()
acc = clean_part(getp("4"))
if "," in acc:
nom = clean_part(getp("1"))
gen = clean_part(getp("2"))
dat = clean_part(getp("3"))
ins = clean_part(getp("5"))
loc = clean_part(getp("6"))
acc_parts = acc.split(",")
if len(acc_parts) == 2:
acc_in, acc_an = acc_parts
for param in t.params:
pn = pname(param)
pv = str(param.value)
if not re.search("^[1-6]$", pn):
pagemsg("WARNING: Unrecognized param: %s=%s" % (pn, pv))
return
del t.params[:]
blib.set_template_name(t, "uk-adecl-manual")
t.add("special", "plonly\n", preserve_spacing=False)
t.add("nom_p", nom + "\n", preserve_spacing=False)
t.add("gen_p", gen + "\n", preserve_spacing=False)
t.add("dat_p", dat + "\n", preserve_spacing=False)
t.add("acc_p_in", acc_in + "\n", preserve_spacing=False)
t.add("acc_p_an", "%s,%s\n" % (acc_in, acc_an), preserve_spacing=False)
t.add("ins_p", ins + "\n", preserve_spacing=False)
t.add("loc_p", loc + "\n", preserve_spacing=False)
notes.append("replace {{uk-decl-num3}} with {{uk-adecl-manual}}")
pagemsg("Replaced %s with %s" % (origt, str(t)))
return str(parsed), notes
parser = blib.create_argparser("Convert {{uk-decl-num3}} to {{uk-adecl-manual}}", include_pagefile=True, include_stdin=True)
args = parser.parse_args()
start, end = blib.parse_start_end(args.start, args.end)
blib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True,
default_refs=["Template:uk-decl-num3"])
| [
"[email protected]"
] | |
fbfa4af6739e251fef1d94b0ce852a6cb2c6cca3 | c1b8ff60ed4d8c70e703f71b7c96a649a75c0cec | /ostPython4/context_mgr.py | 5d67ab14436a6f258a36aef585b8624eba812c9d | [] | no_license | deepbsd/OST_Python | 836d4fae3d98661a60334f66af5ba3255a0cda5c | b32f83aa1b705a5ad384b73c618f04f7d2622753 | refs/heads/master | 2023-02-14T17:17:28.186060 | 2023-01-31T02:09:05 | 2023-01-31T02:09:05 | 49,534,454 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | #!/usr/bin/env python3
#
#
# context_mgr.py
#
# Lesson 14: Context Managers
#
# by David S. Jackson
# 8/17/15
#
# OST Python4: Advanced Python
# for Pat Barton, Instructor
#
"""
Project:
Write a context manager class that suppresses any ValueError
exceptions that occur in the controlled suite, but allows any
other exception to be raised in the surrounding context.
"""
class ctx_mgr:
def __init__(self, raising=True):
self.raising = raising
def __enter__(self):
cm = object()
return cm
def __exit__(self, exc_type, exc_val, exc_tb):
"Self.raising can be overridden, so I reset it excplicitly."
self.raising = True
if exc_type == ValueError:
return self.raising
elif exc_type:
raise
if __name__ == "__main__":
with ctx_mgr(raising=True) as cm:
print('To create ValueError, enter a float or string.')
num = int(input("Enter a number: "))
print('To create an IndexError, enter an int greater than 4.')
myindex = int(input('lst1 = [1,2,3,4,5]. What index is number 4? '))
lst1 = [1,2,3,4,5]
print("The value you selected is: ", lst1[myindex])
print("Divide by zero!", 3/0)
| [
"[email protected]"
] | |
47090964e324910f247fd920b15518fdb4231728 | f4c0172e70ca5ffbe01695245e82a28291f88d04 | /v0.5.3-all/StudyTensroFlow/keras/tests/keras/engine/test_training.py | 6854ffaec08ce2a5aade75e5566d2eb9ec2b49fb | [
"MIT"
] | permissive | huangxinkid/DeepLearning_Wavelet-LSTM | a84e667d5f2db477ac5a9993d8ae329ec9fd115f | b726f99a8631fc48e6943655ace222b0f6b0290b | refs/heads/master | 2020-03-24T07:11:52.832149 | 2018-05-30T18:43:38 | 2018-05-30T18:43:38 | 142,556,218 | 0 | 1 | null | 2018-07-27T09:21:18 | 2018-07-27T09:21:18 | null | UTF-8 | Python | false | false | 43,976 | py | import pytest
import numpy as np
import pandas as pd
from numpy.testing import assert_allclose
import sys
import scipy.sparse as sparse
import keras
from keras import losses
from keras.layers import Dense, Dropout
from keras.engine.topology import Input
from keras.engine.training import Model
from keras.engine.training import _check_loss_and_target_compatibility
from keras.engine.training import _weighted_masked_objective
from keras.engine.training import _check_array_lengths
from keras.engine.training import _slice_arrays
from keras.models import Sequential
from keras import backend as K
from keras.utils import Sequence
from keras.utils.test_utils import keras_test
from keras.callbacks import LambdaCallback
class RandomSequence(Sequence):
def __init__(self, batch_size, sequence_length=12):
self.batch_size = batch_size
self.sequence_length = sequence_length
def __len__(self):
return self.sequence_length
def __getitem__(self, idx):
return [np.random.random((self.batch_size, 3)), np.random.random((self.batch_size, 3))], [
np.random.random((self.batch_size, 4)),
np.random.random((self.batch_size, 3))]
def on_epoch_end(self):
pass
@keras_test
def test_check_array_lengths():
_check_array_lengths(None, None, None)
a_np = np.random.random((4, 3, 3))
_check_array_lengths(a_np, a_np, a_np)
_check_array_lengths([a_np, a_np], [a_np, a_np], [a_np, a_np])
_check_array_lengths([None], [None], [None])
b_np = np.random.random((3, 4))
with pytest.raises(ValueError):
_check_array_lengths(a_np, None, None)
with pytest.raises(ValueError):
_check_array_lengths(a_np, a_np, None)
with pytest.raises(ValueError):
_check_array_lengths([a_np], [None], None)
with pytest.raises(ValueError):
_check_array_lengths([a_np], [b_np], None)
with pytest.raises(ValueError):
_check_array_lengths([a_np], None, [b_np])
@keras_test
def test_slice_arrays():
input_a = np.random.random((10, 3))
_slice_arrays(None)
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
input_a = [None, [1, 1], None, [1, 1]]
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
input_a = [None]
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
input_a = None
_slice_arrays(input_a, 0)
_slice_arrays(input_a, 0, 1)
_slice_arrays(input_a, stop=2)
@keras_test
def test_weighted_masked_objective():
a = Input(shape=(3,), name='input_a')
# weighted_masked_objective
def mask_dummy(y_true=None, y_pred=None, weight=None):
return K.placeholder(y_true.shape)
weighted_function = _weighted_masked_objective(losses.categorical_crossentropy)
weighted_function(a, a, None)
@keras_test
def test_model_methods():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
# training/testing doesn't work before compiling.
with pytest.raises(RuntimeError):
model.train_on_batch([input_a_np, input_b_np], [output_a_np, output_b_np])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.train_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# test fit
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=4)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4)
# test validation_split
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5)
# test validation data
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4,
validation_data=([input_a_np, input_b_np], [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np],
epochs=1, batch_size=4, validation_split=0.5,
validation_data=({'input_a': input_a_np, 'input_b': input_b_np}, [output_a_np, output_b_np]))
out = model.fit({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np},
epochs=1, batch_size=4, validation_split=0.5,
validation_data=(
{'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np}))
# test_on_batch
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_a': input_a_np, 'input_b': input_b_np},
{'dense_1': output_a_np, 'dropout': output_b_np})
# predict_on_batch
out = model.predict_on_batch([input_a_np, input_b_np])
out = model.predict_on_batch({'input_a': input_a_np, 'input_b': input_b_np})
# predict, evaluate
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# with sample_weight
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
sample_weight = [None, np.random.random((10,))]
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
# test accuracy metric
model.compile(optimizer, loss, metrics=['acc'],
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 5
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 5
# this should also work
model.compile(optimizer, loss, metrics={'dense_1': 'acc'},
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
# and this as well
model.compile(optimizer, loss, metrics={'dense_1': ['acc']},
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == 4
# test starting from non-zero initial epoch
trained_epochs = []
trained_batches = []
# define tracer callback
def on_epoch_begin(epoch, logs):
trained_epochs.append(epoch)
def on_batch_begin(batch, logs):
trained_batches.append(batch)
tracker_cb = LambdaCallback(on_epoch_begin=on_epoch_begin,
on_batch_begin=on_batch_begin)
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np], epochs=5, batch_size=4,
initial_epoch=2, callbacks=[tracker_cb])
assert trained_epochs == [2, 3, 4]
# test starting from non-zero initial epoch for generator too
trained_epochs = []
def gen_data(batch_sz):
while True:
yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
[np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
out = model.fit_generator(gen_data(4), steps_per_epoch=3, epochs=5,
initial_epoch=2, callbacks=[tracker_cb])
assert trained_epochs == [2, 3, 4]
# test with a custom metric function
def mse(y_true, y_pred):
return K.mean(K.pow(y_true - y_pred, 2))
model.compile(optimizer, loss, metrics=[mse],
sample_weight_mode=None)
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
out_len = 1 + 2 * (1 + 1) # total loss + 2 outputs * (loss + metric)
assert len(out) == out_len
out = model.test_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
assert len(out) == out_len
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.fit([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4, epochs=1)
out = model.evaluate([input_a_np, input_b_np], [output_a_np, output_b_np], batch_size=4)
out = model.predict([input_a_np, input_b_np], batch_size=4)
# enable verbose for evaluate_generator
out = model.evaluate_generator(gen_data(4), steps=3, verbose=1)
# empty batch
with pytest.raises(ValueError):
def gen_data():
while True:
yield (np.asarray([]), np.asarray([]))
out = model.evaluate_generator(gen_data(), steps=1)
# x is not a list of numpy arrays.
with pytest.raises(ValueError):
out = model.predict([None])
# x does not match _feed_input_names.
with pytest.raises(ValueError):
out = model.predict([input_a_np, None, input_b_np])
with pytest.raises(ValueError):
out = model.predict([None, input_a_np, input_b_np])
# all input/output/weight arrays should have the same number of samples.
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np[:2]],
[output_a_np, output_b_np],
sample_weight=sample_weight)
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np[:2]],
sample_weight=sample_weight)
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=[sample_weight[1], sample_weight[1][:2]])
# `sample_weight` is neither a dict nor a list.
with pytest.raises(TypeError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=tuple(sample_weight))
# `validation_data` is neither a tuple nor a triple.
with pytest.raises(ValueError):
out = model.fit([input_a_np, input_b_np],
[output_a_np, output_b_np],
epochs=1, batch_size=4,
validation_data=([input_a_np, input_b_np],))
# `loss` does not match outputs.
with pytest.raises(ValueError):
model.compile(optimizer, loss=['mse', 'mae', 'mape'])
# `loss_weights` does not match output_names.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', loss_weights={'lstm': 0.5})
# `loss_weights` does not match outputs.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', loss_weights=[0.5])
# `loss_weights` is invalid type.
with pytest.raises(TypeError):
model.compile(optimizer, loss='mse', loss_weights=(0.5, 0.5))
# `sample_weight_mode` does not match output_names.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', sample_weight_mode={'lstm': 'temporal'})
# `sample_weight_mode` does not match output_names.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', sample_weight_mode=['temporal'])
# `sample_weight_mode` matches output_names partially.
with pytest.raises(ValueError):
model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': 'temporal'})
# `loss` does not exist.
with pytest.raises(ValueError):
model.compile(optimizer, loss=[])
model.compile(optimizer, loss=['mse', 'mae'])
model.compile(optimizer, loss='mse', loss_weights={'dense_1': 0.2, 'dropout': 0.8})
model.compile(optimizer, loss='mse', loss_weights=[0.2, 0.8])
# the rank of weight arrays should be 1.
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=[None, np.random.random((10, 20, 30))])
model.compile(optimizer, loss='mse', sample_weight_mode={'dense_1': None, 'dropout': 'temporal'})
model.compile(optimizer, loss='mse', sample_weight_mode=[None, 'temporal'])
# the rank of output arrays should be at least 3D.
with pytest.raises(ValueError):
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
sample_weight=sample_weight)
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
trained_epochs = []
trained_batches = []
out = model.fit_generator(generator=RandomSequence(3), steps_per_epoch=3, epochs=5,
initial_epoch=0, validation_data=RandomSequence(4),
validation_steps=3, callbacks=[tracker_cb])
assert trained_epochs == [0, 1, 2, 3, 4]
assert trained_batches == list(range(3)) * 5
# steps_per_epoch will be equal to len of sequence if it's unspecified
trained_epochs = []
trained_batches = []
out = model.fit_generator(generator=RandomSequence(3), epochs=5,
initial_epoch=0, validation_data=RandomSequence(4),
callbacks=[tracker_cb])
assert trained_epochs == [0, 1, 2, 3, 4]
assert trained_batches == list(range(12)) * 5
# fit_generator will throw an exception if steps is unspecified for regular generator
with pytest.raises(ValueError):
def gen_data():
while True:
yield (np.asarray([]), np.asarray([]))
out = model.fit_generator(generator=gen_data(), epochs=5,
initial_epoch=0, validation_data=gen_data(),
callbacks=[tracker_cb])
# Check if generator is only accessed an expected number of times
gen_counters = [0, 0]
def gen_data(i):
while True:
gen_counters[i] += 1
yield ([np.random.random((1, 3)), np.random.random((1, 3))],
[np.random.random((1, 4)), np.random.random((1, 3))])
out = model.fit_generator(generator=gen_data(0), epochs=3,
steps_per_epoch=2,
validation_data=gen_data(1),
validation_steps=1,
max_queue_size=2,
workers=2)
# Need range check here as filling of the queue depends on sleep in the enqueuers
assert 6 <= gen_counters[0] <= 8
# 12 = (epoch * workers * validation steps * max_queue_size)
assert 3 <= gen_counters[1] <= 12
gen_counters = [0]
out = model.fit_generator(generator=RandomSequence(3), epochs=3,
validation_data=gen_data(0),
validation_steps=1,
max_queue_size=2,
workers=2)
# 12 = (epoch * workers * validation steps * max_queue_size)
# Need range check here as filling of the queue depends on sleep in the enqueuers
assert 3 <= gen_counters[0] <= 12
# predict_generator output shape behavior should be consistent
def expected_shape(batch_size, n_batches):
return (batch_size * n_batches, 4), (batch_size * n_batches, 3)
# Multiple outputs and one step.
batch_size = 5
sequence_length = 1
shape_0, shape_1 = expected_shape(batch_size, sequence_length)
out = model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1
# Multiple outputs and multiple steps.
batch_size = 5
sequence_length = 2
shape_0, shape_1 = expected_shape(batch_size, sequence_length)
out = model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out[0]) == shape_0 and np.shape(out[1]) == shape_1
# Create a model with a single output.
single_output_model = Model([a, b], a_2)
single_output_model.compile(optimizer, loss, metrics=[], sample_weight_mode=None)
# Single output and one step.
batch_size = 5
sequence_length = 1
shape_0, _ = expected_shape(batch_size, sequence_length)
out = single_output_model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out) == shape_0
# Single output and multiple steps.
batch_size = 5
sequence_length = 2
shape_0, _ = expected_shape(batch_size, sequence_length)
out = single_output_model.predict_generator(RandomSequence(batch_size,
sequence_length=sequence_length))
assert np.shape(out) == shape_0
@pytest.mark.skipif(sys.version_info < (3,), reason='Cannot catch warnings in python 2')
@keras_test
def test_warnings():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None)
def gen_data(batch_sz):
while True:
yield ([np.random.random((batch_sz, 3)), np.random.random((batch_sz, 3))],
[np.random.random((batch_sz, 4)), np.random.random((batch_sz, 3))])
with pytest.warns(Warning) as w:
out = model.fit_generator(gen_data(4), steps_per_epoch=10, use_multiprocessing=True, workers=2)
warning_raised = any(['Sequence' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when using generator with processes.'
with pytest.warns(None) as w:
out = model.fit_generator(RandomSequence(3), steps_per_epoch=4, use_multiprocessing=True, workers=2)
assert all(['Sequence' not in str(w_.message) for w_ in w]), 'A warning was raised for Sequence.'
@keras_test
def test_sparse_inputs_targets():
test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
in1 = Input(shape=(3,))
in2 = Input(shape=(3,))
out1 = Dropout(0.5, name='dropout')(in1)
out2 = Dense(4, name='dense_1')(in2)
model = Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
model.compile('rmsprop', 'mse')
model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='sparse operations supported only by TensorFlow')
@keras_test
def test_sparse_placeholder_fit():
test_inputs = [sparse.random(6, 3, density=0.25).tocsr() for _ in range(2)]
test_outputs = [sparse.random(6, i, density=0.25).tocsr() for i in range(3, 5)]
in1 = Input(shape=(3,))
in2 = Input(shape=(3,), sparse=True)
out1 = Dropout(0.5, name='dropout')(in1)
out2 = Dense(4, name='dense_1')(in2)
model = Model([in1, in2], [out1, out2])
model.predict(test_inputs, batch_size=2)
model.compile('rmsprop', 'mse')
model.fit(test_inputs, test_outputs, epochs=1, batch_size=2, validation_split=0.5)
model.evaluate(test_inputs, test_outputs, batch_size=2)
@keras_test
def test_trainable_argument():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = Sequential()
model.add(Dense(2, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
# test with nesting
inputs = Input(shape=(3,))
outputs = model(inputs)
model = Model(inputs, outputs)
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
@keras_test
def test_with_list_as_targets():
model = Sequential()
model.add(Dense(1, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
x = np.random.random((2, 3))
y = [0, 1]
model.train_on_batch(x, y)
@keras_test
def test_check_not_failing():
a = np.random.random((2, 1, 3))
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, None, 3)])
@keras_test
def test_check_last_is_one():
a = np.random.random((2, 3, 1))
with pytest.raises(ValueError) as exc:
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [a.shape])
assert 'You are passing a target array' in str(exc)
@keras_test
def test_check_bad_shape():
a = np.random.random((2, 3, 5))
with pytest.raises(ValueError) as exc:
_check_loss_and_target_compatibility([a], [losses.categorical_crossentropy], [(2, 3, 6)])
assert 'targets to have the same shape' in str(exc)
@pytest.mark.skipif(K.backend() != 'tensorflow', reason='Requires TensorFlow backend')
@keras_test
def test_model_with_input_feed_tensor():
"""We test building a model with a TF variable as input.
We should be able to call fit, evaluate, predict,
by only passing them data for the placeholder inputs
in the model.
"""
import tensorflow as tf
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
model = Model([a, b], [a_2, b_2])
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
model.compile(optimizer, loss, metrics=['mean_squared_error'],
loss_weights=loss_weights,
sample_weight_mode=None)
# test train_on_batch
out = model.train_on_batch(input_b_np,
[output_a_np, output_b_np])
out = model.train_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.test_on_batch({'input_b': input_b_np},
[output_a_np, output_b_np])
out = model.predict_on_batch({'input_b': input_b_np})
# test fit
out = model.fit({'input_b': input_b_np},
[output_a_np, output_b_np], epochs=1, batch_size=10)
out = model.fit(input_b_np,
[output_a_np, output_b_np], epochs=1, batch_size=10)
# test evaluate
out = model.evaluate({'input_b': input_b_np},
[output_a_np, output_b_np], batch_size=10)
out = model.evaluate(input_b_np,
[output_a_np, output_b_np], batch_size=10)
# test predict
out = model.predict({'input_b': input_b_np}, batch_size=10)
out = model.predict(input_b_np, batch_size=10)
assert len(out) == 2
# Now test a model with a single input
# i.e. we don't pass any data to fit the model.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
# test evaluate
out = model.evaluate(None,
output_a_np, batch_size=10)
out = model.evaluate(None,
output_a_np, batch_size=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
# Same, without learning phase
# i.e. we don't pass any data to fit the model.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
model = Model(a, a_2)
model.summary()
optimizer = 'rmsprop'
loss = 'mse'
model.compile(optimizer, loss, metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None,
output_a_np)
out = model.train_on_batch(None,
output_a_np)
out = model.test_on_batch(None,
output_a_np)
out = model.predict_on_batch(None)
out = model.train_on_batch([],
output_a_np)
out = model.train_on_batch({},
output_a_np)
# test fit
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
out = model.fit(None,
output_a_np, epochs=1, batch_size=10)
# test evaluate
out = model.evaluate(None,
output_a_np, batch_size=10)
out = model.evaluate(None,
output_a_np, batch_size=10)
# test predict
out = model.predict(None, steps=3)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
@keras_test
def test_model_with_partial_loss():
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dropout': 'mse'}
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
# test train_on_batch
out = model.train_on_batch(input_a_np, output_a_np)
out = model.test_on_batch(input_a_np, output_a_np)
# fit
out = model.fit(input_a_np, [output_a_np])
# evaluate
out = model.evaluate(input_a_np, [output_a_np])
# Same without dropout.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
a_3 = Dense(4, name='dense_2')(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = {'dense_2': 'mse'}
model.compile(optimizer, loss, metrics={'dense_1': 'mae'})
# test train_on_batch
out = model.train_on_batch(input_a_np, output_a_np)
out = model.test_on_batch(input_a_np, output_a_np)
# fit
out = model.fit(input_a_np, [output_a_np])
# evaluate
out = model.evaluate(input_a_np, [output_a_np])
@keras_test
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='cntk does not support external loss yet')
def test_model_with_external_loss():
# None loss, only regularization loss.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1',
kernel_regularizer='l1',
bias_regularizer='l2')(a)
dp = Dropout(0.5, name='dropout')
a_3 = dp(a_2)
model = Model(a, [a_2, a_3])
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
input_a_np = np.random.random((10, 3))
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# No dropout, external loss.
a = Input(shape=(3,), name='input_a')
a_2 = Dense(4, name='dense_1')(a)
a_3 = Dense(4, name='dense_2')(a)
model = Model(a, [a_2, a_3])
model.add_loss(K.mean(a_3 + a_2))
optimizer = 'rmsprop'
loss = None
model.compile(optimizer, loss, metrics=['mae'])
# test train_on_batch
out = model.train_on_batch(input_a_np, None)
out = model.test_on_batch(input_a_np, None)
# fit
out = model.fit(input_a_np, None)
# evaluate
out = model.evaluate(input_a_np, None)
# Test fit with no external data at all.
if K.backend() == 'tensorflow':
import tensorflow as tf
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_2 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_2)
model = Model(a, a_2)
model.add_loss(K.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with pytest.raises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with pytest.raises(ValueError):
out = model.fit(None, None,
epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None,
epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with pytest.raises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with pytest.raises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
assert out.shape == (10 * 3, 4)
# Test multi-output model without external data.
a = Input(tensor=tf.Variable(input_a_np, dtype=tf.float32))
a_1 = Dense(4, name='dense_1')(a)
a_2 = Dropout(0.5, name='dropout')(a_1)
model = Model(a, [a_1, a_2])
model.add_loss(K.mean(a_2))
model.compile(optimizer='rmsprop',
loss=None,
metrics=['mean_squared_error'])
# test train_on_batch
out = model.train_on_batch(None, None)
out = model.test_on_batch(None, None)
out = model.predict_on_batch(None)
# test fit
with pytest.raises(ValueError):
out = model.fit(None, None, epochs=1, batch_size=10)
out = model.fit(None, None, epochs=1, steps_per_epoch=1)
# test fit with validation data
with pytest.raises(ValueError):
out = model.fit(None, None,
epochs=1,
steps_per_epoch=None,
validation_steps=2)
out = model.fit(None, None,
epochs=1,
steps_per_epoch=2,
validation_steps=2)
# test evaluate
with pytest.raises(ValueError):
out = model.evaluate(None, None, batch_size=10)
out = model.evaluate(None, None, steps=3)
# test predict
with pytest.raises(ValueError):
out = model.predict(None, batch_size=10)
out = model.predict(None, steps=3)
assert len(out) == 2
assert out[0].shape == (10 * 3, 4)
assert out[1].shape == (10 * 3, 4)
@keras_test
def test_target_tensors():
# single-output, as list
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,), name='dense'))
input_val = np.random.random((10, 4))
target_val = np.random.random((10, 4))
target = keras.backend.variable(target_val)
model.compile(optimizer='rmsprop', loss='mse', target_tensors=[target])
model.train_on_batch(input_val, None)
# single-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense': target})
model.train_on_batch(input_val, None)
# test invalid arguments
with pytest.raises(TypeError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=set())
with pytest.raises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target, target])
with pytest.raises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense2': None})
with pytest.raises(ValueError):
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target])
model.train_on_batch(input_val, target_val)
# multi-output, as list
input_val = np.random.random((10, 4))
target_val_a = np.random.random((10, 4))
target_val_b = np.random.random((10, 4))
target_a = keras.backend.variable(target_val_a)
target_b = keras.backend.variable(target_val_b)
inputs = keras.layers.Input(shape=(4,))
output_a = keras.layers.Dense(4, name='dense_a')(inputs)
output_b = keras.layers.Dense(4, name='dense_b')(inputs)
model = keras.models.Model(inputs, [output_a, output_b])
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None)
# multi-output, as dict
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_a': target_a,
'dense_b': target_b})
model.train_on_batch(input_val, None)
# test with sample weights
model.compile(optimizer='rmsprop', loss='mse',
target_tensors=[target_a, target_b])
model.train_on_batch(input_val, None,
sample_weight={'dense_a': np.random.random((10,))})
@keras_test
def test_model_custom_target_tensors():
a = Input(shape=(3,), name='input_a')
b = Input(shape=(3,), name='input_b')
a_2 = Dense(4, name='dense_1')(a)
dp = Dropout(0.5, name='dropout')
b_2 = dp(b)
y = K.placeholder([10, 4], name='y')
y1 = K.placeholder([10, 3], name='y1')
y2 = K.placeholder([7, 5], name='y2')
model = Model([a, b], [a_2, b_2])
optimizer = 'rmsprop'
loss = 'mse'
loss_weights = [1., 0.5]
# test list of target tensors
with pytest.raises(ValueError):
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1, y2])
model.compile(optimizer, loss, metrics=[], loss_weights=loss_weights,
sample_weight_mode=None, target_tensors=[y, y1])
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
output_a_np = np.random.random((10, 4))
output_b_np = np.random.random((10, 3))
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
{y: np.random.random((10, 4)),
y1: np.random.random((10, 3))})
# test dictionary of target_tensors
with pytest.raises(ValueError):
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'does_not_exist': y2})
# test dictionary of target_tensors
model.compile(optimizer, loss,
metrics=[],
loss_weights=loss_weights,
sample_weight_mode=None,
target_tensors={'dense_1': y, 'dropout': y1})
out = model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np],
{y: np.random.random((10, 4)),
y1: np.random.random((10, 3))})
if K.backend() == 'tensorflow':
import tensorflow as tf
# test with custom TF placeholder as target
pl_target_a = tf.placeholder('float32', shape=(None, 4))
model.compile(optimizer='rmsprop', loss='mse',
target_tensors={'dense_1': pl_target_a})
model.train_on_batch([input_a_np, input_b_np],
[output_a_np, output_b_np])
@pytest.mark.skipif(sys.version_info < (3,), reason='Cannot catch warnings in python 2')
@keras_test
def test_trainable_weights_count_consistency():
"""Tests the trainable weights consistency check of Model.
This verifies that a warning is shown if model.trainable is modified
and the model is summarized/run without a new call to .compile()
Reproduce issue #8121
"""
a = Input(shape=(3,), name='input_a')
model1 = Model(inputs=a, outputs=Dense(1)(a))
model1.trainable = False
b = Input(shape=(3,), name='input_b')
y = model1(b)
model2 = Model(inputs=b, outputs=Dense(1)(y))
model2.compile(optimizer='adam', loss='mse')
model1.trainable = True
# Should warn on .summary()
with pytest.warns(UserWarning) as w:
model2.summary()
warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when trainable is modified without .compile.'
# And on .fit()
with pytest.warns(UserWarning) as w:
model2.fit(x=np.zeros((5, 3)), y=np.zeros((5, 1)))
warning_raised = any(['Discrepancy' in str(w_.message) for w_ in w])
assert warning_raised, 'No warning raised when trainable is modified without .compile.'
# And shouldn't warn if we recompile
model2.compile(optimizer='adam', loss='mse')
with pytest.warns(None) as w:
model2.summary()
assert len(w) == 0, "Warning raised even when .compile() is called after modifying .trainable"
@keras_test
def test_pandas_dataframe():
input_a = Input(shape=(3,), name='input_a')
input_b = Input(shape=(3,), name='input_b')
x = Dense(4, name='dense_1')(input_a)
y = Dense(3, name='desne_2')(input_b)
model_1 = Model(inputs=input_a, outputs=x)
model_2 = Model(inputs=[input_a, input_b], outputs=[x, y])
optimizer = 'rmsprop'
loss = 'mse'
model_1.compile(optimizer=optimizer, loss=loss)
model_2.compile(optimizer=optimizer, loss=loss)
input_a_df = pd.DataFrame(np.random.random((10, 3)))
input_b_df = pd.DataFrame(np.random.random((10, 3)))
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.predict(input_a_df)
model_2.predict([input_a_df, input_b_df])
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
model_1.predict_on_batch(input_a_df)
model_2.predict_on_batch([input_a_df, input_b_df])
model_1.predict_on_batch([input_a_df])
model_1.predict_on_batch({'input_a': input_a_df})
model_2.predict_on_batch({'input_a': input_a_df, 'input_b': input_b_df})
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.train_on_batch(input_a_df,
output_a_df)
model_2.train_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.train_on_batch([input_a_df],
[output_a_df])
model_1.train_on_batch({'input_a': input_a_df},
output_a_df)
model_2.train_on_batch({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.test_on_batch(input_a_df,
output_a_df)
model_2.test_on_batch([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.test_on_batch([input_a_df],
[output_a_df])
model_1.test_on_batch({'input_a': input_a_df},
output_a_df)
model_2.test_on_batch({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
if __name__ == '__main__':
pytest.main([__file__])
| [
"[email protected]"
] | |
0d16857d7fd0f668e17201298d880dd834ab42de | d304c27c095a7e897bb9c02e78d34bed4398c8fc | /alex/components/simulator/user_simulator/demos/ptien/ptien_metadata.py | 1d75d96f2c4f6859871f52671389df68aeecb270 | [
"Apache-2.0"
] | permissive | thanhlct/alex | 876630e7cb2a6b1affce5bb646e6bd0489305393 | 9fabefb62572e96d14654d3ec0c8861daf51ffa7 | refs/heads/master | 2020-04-05T18:29:37.300215 | 2016-05-19T08:51:21 | 2016-05-19T08:51:21 | 45,947,050 | 0 | 0 | null | 2015-11-10T23:23:27 | 2015-11-10T23:23:27 | null | UTF-8 | Python | false | false | 39,235 | py | from alex.utils.sample_distribution import sample_from_list
from alex.utils.sample_distribution import sample_a_prob
import alex.utils.matlab_functions as matlab
from infer_place_info import add_place_info
def values_generator1(goal, slot):
'''Generate all values for a slot'''
return [1,2,3]
def values_generator2(goal, slot):
return [7,8,9]
def alternative_value_fun():
'''A generator for a slot during conversation'''
a = ['next', 'prev', 'last', '1', '2', '3', '4', 'next hour']
return sample_from_list(a)
def post_process_act(das):
#return das
das = das[0]
#print 'in das:', das
#import pdb
da_des = get_dialogue_act_metadata(das)
#FILTER from/to borough out of user act if this turn doesn' include from/to street, stop and also keep inform borough with prob. of 0.5
if 'inform' in da_des and 'from_borough' in da_des['inform']['slots'] and len(da_des['inform']['slots'])>1:
lst = matlab.subtract(['from_stop'], da_des['inform']['slots'])
prob = 0.7
if len(lst)<1:
prob=0.3
if is_only_borough(da_des):
prob = 0.0
if sample_a_prob(prob):
das.dais.remove('inform(from_borough="' + da_des['inform']['slot_value']['from_borough'] + '")')
print 'remove from_borough'
#pdb.set_trace()
if 'inform' in da_des and 'to_borough' in da_des['inform']['slots'] and len(da_des['inform']['slots'])>1:
lst = matlab.subtract(['to_stop'], da_des['inform']['slots'])
prob = 0.7#70% remove borough from inform
if len(lst)<1:#has to_stop, remove with 30%
prob=0.3
if is_only_borough(da_des):#only borough don't remove
prob = 0.0
if sample_a_prob(prob):
das.dais.remove('inform(to_borough="' + da_des['inform']['slot_value']['to_borough'] + '")')
print 'remove to_borough'
#pdb.set_trace()
return [das]
def is_only_borough(des):
if len(des['inform']['slots'])==2 and matlab.is_equal(['from_borough', 'to_borough'], des['inform']['slots']):
return True
elif len(des['inform']['slots'])==1 and ('from_borough' in des['inform']['slots'] or 'to_borough' in des['inform']['slots']):
return True
else:
return False
def post_process_final_goal(goal):
goal= add_place_info(goal)
return goal
def reward_last_turn(goal, last_da):
return -1
def reward_final_goal(goal, turns):
#Successful diaogue: 20; Unsuccessful: 0
success_reward = 20
failure_reward = 0
last_offer = None
for i in range(len(turns)-1, -1, -1):
da = turns[i]['sys_da'][0]
if da.has_dat('offer'):
last_offer = da
break
if last_offer is None:
return failure_reward
reward = success_reward
last_offer = get_dialogue_act_metadata(last_offer)['offer']['slot_value']
for k, v in goal.items():
if v != get_slot_value(last_offer, k):
print 'WRONG: ', k, '~', v
reward=failure_reward
break
return reward
def get_slot_value(offer, slot):
if slot in offer.keys():
return offer[slot]
eq_slots=[('from_borough', 'from_stop', 'from_city', 'from_street'), ('to_borough', 'to_stop', 'to_city', 'to_street'),
('arrival_time', 'arrival_time_rel'), ('departure_time', 'departure_time_rel'),]
for eq in eq_slots:
if slot in eq:
break
for s in eq:
if s in offer.keys():
return offer[s]
return None
def get_dialogue_act_metadata(da):
'''Return metadata describe the dialogue act given.
Returns:
A dict presenting statistical info about all slots, values used for each action in the given da.
'''
d = {}
for item in da:
act = item.dat
slot = item.name
value = item.value
if act in d.keys():
d[act]['slots'].append(slot)
d[act]['values'].append(value)
d[act]['slot_value'][slot] = value
else:
d[act] = {
'slots': [slot],
'values': [value],
'slot_value': {slot:value},
}
return d
config = {
'user_simulator':{
'SimpleUserSimulator':{
'debug': True,
'patience_level':8,#minimum 1,the number of repeated ask the same thing to get angry and hang up, set to 0 mean never hang up
'patience_levels':{
4: 0.5,
7: 0.2,
3: 0.3,
#2: 0.2,
},
'out_of_patience_act':'hangup()',
'metadata':{
'slots': ['from_stop', 'to_stop', 'from_city', 'to_city', 'from_street', 'to_street',
'departure_time', 'departure_date', 'arrival_time', 'arrival_date',
'vihecle', 'arrival_time_rel', 'depature_time_rel', 'number_transfers', 'duration',' distance',
'street', 'city', 'state',
'alternative', 'date_rel',#How to push it in to the simulator
'slot_fun',#only for test slots have value list generating dyanmically from fun
],#only for easy seeing and imagining, not being used in coding
'goals': [
{'fixed_slots':[('task','find_connection'),],
'changeable_slots':['from_stop', 'to_stop', 'from_city', 'to_city', 'from_street', 'to_street',
'departure_time', 'arrival_time', 'departure_time_rel', 'arrival_time_rel',
'vehicle',
'number_transfer', 'duration', 'distance',#users dont know these slot
],
'one_of_slot_set':[
{('from_stop', 'to_stop'):0.3,#choose only one of these set
('from_city', 'to_city'):0.2,
('from_street', 'to_street'):0.3,
('from_stop', 'to_street'):0.2,
},#end of the fist defination of one_of_slot_set
{():0.3,
('arrival_time',):0.1,
('departure_time',):0.1,
('arrival_time_rel',):0.25,
('departure_time_rel',):0.25,
},
{():0.5,
('vehicle',):0.5,
},
],
'equivalent_slots':[#('from_stop', 'from_street', 'from_borough', 'from_city'), ('to_stop', 'to_street', 'to_borough', 'to_city'),
('from_stop', 'from_street', 'from_city'), ('to_stop', 'to_street', 'to_city'),
('arrival_time', 'arrival_time_rel'), ('departure_time', 'departure_time_rel'),
],
'sys_unaskable_slots':['number_transfer', 'duration', 'distance',],
#'default_slots_values':[('departure_time', 'as soon as possible'), ('vehicle', 'dontcare'), ('arrival_time', 'as soon as possible')],
'default_slots_values':[('departure_time', 'now'), ('vehicle', 'dontcare'), ('arrival_time', 'now')],
#'add_fixed_slot_to_goal': True,
'active_prob':1.0,#probability of observing the task being active
'same_table_slot_keys':[],#defining when serveral slots connected to a row in a table and we would like to get them linked together
'goal_post_process_fun': post_process_final_goal,#post process function to refine the sampled goal, which will be defined for specific semantic relations
'act_post_process_fun': post_process_act,#post process function to refine user act
'goal_slot_relax_fun': None,#support function, relax the value of a slot given curretn goal, e.g. more late arrival, departure sooner, not used yet, for this purpose will be pushed into action handler
'reward_last_da_fun': reward_last_turn,
'reward_final_goal_fun': reward_final_goal,
'end_dialogue_post_process_fun': None,
'slot_used_sequence':{#higher level is only able to used when one of slot at previous level used#TODO not used in the code yet
0:('task',),
1:('from_stop', 'from_city', 'from_street', 'to_stop', 'to_city', 'to_street'),
#1:('from_stop', 'from_city', 'from_street', 'to_stop', 'to_city', 'to_street', 'departure_time', 'arrival_time', 'departure_tiem_rel', 'arrival_time_rel', 'vehicle'),
2:('departure_time', 'arrival_time', 'departure_tiem_rel', 'arrival_time_rel', 'vehicle'),
#only need one of slot in each level informed to get next level
},
},
{'fixed_slots':[('task','find_platform'),],
'changeable_slots':['street', 'city', 'state'],
'one_of_slot_set':[],
'sys_unaskable_slots':[],
'default_slots_values':[],
'active_prob':0.0,
'same_table_slot_keys': ['place'],
'goal_post_process_fun': None,
'goal_slot_relax_fun': None,
},
{'fixed_slots':[('task','weather'),],
'changeable_slots':['city', 'state'],
'one_of_slot_set':[],
'sys_unaskable_slots':[],
'default_slots_values':[],
'active_prob':0.0,
'same_table_slot_keys':['place'],
'goal_post_process_fun': None,
'goal_slot_relax_fun': None,
},
],
'slot_table_field_mapping':{'from_stop':[('stops','stop')],
'to_stop':[('stops', 'stop')],
'from_city':[('cities', 'city')],
'to_city':[('cities', 'city')],
'from_street':[('streets', 'street')],
'to_street':[('streets', 'street')],
'departure_time':[('time', 'time')],
'departure_time_rel':[('time_relative', 'relative')],
'arrival_time': [('time', 'time')],
'arrival_time_rel': [('time_relative', 'relative')],
'vehicle': [('vehicles', 'vehicle')],
'street':[('streets', 'street'), ('places', 'street')],
'city':[('cities', 'city'), ('places', 'city')],
'state':[('states', 'state'), ('places', 'state')],
'slot_fun':[values_generator1, values_generator2]#slot has the list of values being generated dynamically from functions, each function has to return a list of values, the list could includes only one element.
},
'same_table_slots':{'place':{'table': 'places',
'slots': ['street', 'city', 'state'],
},
'from_place':{'table':'places',#just for future when whe have such data.
'slots': ['from_stop', 'from_street', 'from_city', 'from_state'],
},
'to_place':{'table':'places',
'slots': ['to_stop', 'to_street', 'to_city', 'to_state'],
}
},
'status_included': ['correct', 'incorect', 'unmentioned'],#'pending', 'filled', 'all'],# only for imagining
'slot_value_from':['goal', 'sys_da'],#only for imagining
'slot_from': ['sys_da', 'none', 'goal'],
'answer_types':['direct_answer', 'over_answer', 'complete_answer'],#only for easy seeing and imagining
'dialogue_act_definitions': {#dialogue acts which user simulator used for answering
'request':{
'slot_included': True,
'value_included': False,
'combineable_slots': ['duration'],#['number_transfer', 'duration', 'distance']# return confliction after request
},
'inform':{
'slot_included': True,
'value_included': True,
'slot_from': 'sys_da', #in normal case, list of slots will be informed is taken from system dialogue request act, or from goal
'value_from': 'goal', #in normal case, where to get values for selected slots
#'limited_slots': ['from_borough', 'to_borough'], #list of slot cant combine, except syste ask directly
'accept_used_slots': False,
'use_slot_sequence': False,
},
'oog':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'deny':{
'slot_included': True,
'value_included': True,
'slot_from': 'sys_da',
'value_from': 'sys_da',
'status_included': 'incorrect',
},
'repeat':{
'slot_included': False,
'value_included': False,
},
'help':{
'slot_included': False,
'value_included': False,
},
'apology':{
'slot_included': False,
'value_included': False,
},
'confirm':{#make a question to clarify something, ?User may also make this action?? How to make it? only at the end?, since simulator always know exactly what is going on
'slot_included': True,
'value_included': True,
'status_included': 'filled',
},
'canthearyou, notunderstood':{#only available for system, not for user
},
'affirm':{#simply YES #something interesting here, doesn't include slot/value, but slots consider from sys_da and they are correct
'slot_included': False,
'value_included': False,
'slot_from': 'sys_da',
'status_included': 'correct',
'status_in_all_slots': True,
},
'ack':{
'slot_included': False,
'value_included': False,
},
'thankyou':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'silence':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'reqalts':{
'slot_included': True,
'value_included': True,
'combineable_slots': ['alternative'],
'slot_from': 'none',
'value_from': 'function',
'value_fun': alternative_value_fun,
},
'negate':{
'slot_included': False,
'value_included': False,
'slot_from': 'sys_da',
'status_included': 'incorrect',
},
'bye':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'hello':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
#'add_to_da_prob':0.5,
},
'restart':{#TODO how to user this action?
'slot_included': False,
'value_included': False,
},
'hangup':{
'slot_included': False,
'value_included': False,
'act_without_slot': True,
},
'help':{#How?
'slot_included': False,
'value_included': False,
},
},
'act_formats':{#not being used
'slot_value_correct':{
'slot_included': True,
'value_included': True,
'correct_slot_included': False,
'incorrect_slot_included': False,
'value_from': 'goal', #or from sys_da
}
},
'reply_system_acts':{#how to combine several act types to respon an actions,list like below is quite ok, but ???
'request':[{'return_acts':['inform'],#return acts canbe multiple act
'inform_answer_types':{
'direct_answer':0.7,
'over_answer':0.2,
'complete_answer':0.1,
},
'inform_overridden_properties':{
#'use_slot_sequence': True,#will be error someday when system ask a slot which is absen in the current goal
},
'active_prob':0.95,
},
{'return_acts':['silence'],
'active_prob':0.00,
},
{'return_acts':['oog'],
'active_prob':0.05,
},
],
'confirm':[{#explict confirm
#only one action in the set or specify explicitly the apply order and stop when first appliable
#can we change to return_acts, what is different to keep booth? should maintain both for short config and clear distuiguish between two cases
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':0.7, #0.5
},
'case2':{'return_acts':['affirm', 'inform'],
'active_prob':0.3,#0.5
'inform_answer_types':{
'over_answer':0.8,
'complete_answer': 0.2,
},
'inform_overridden_properties':{
'slot_from': 'none',#should be none - nowhere, dont take slot form any where
'accept_used_slots': True,
},
},
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.4,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny'],
'active_prob':0.2,
},
'case3':{'return_acts':['deny', 'inform'],
'active_prob':0.4,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],
'active_prob':1.0
},#end of the firs way of answer
],
'implconfirm':[{'active_prob': 1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':1.0,
'affirm_overridden_properties':{
'add_to_da_prob':0.5,
}
},#end of first way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],
},#end of the first way of answer
],
'iconfirm':[{'active_prob': 1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':1.0,
'affirm_overridden_properties':{
'add_to_da_prob':0.5,
}
},#end of first way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],
},#end of the first way of answer
],
'inform':[{'active_prob': 1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm'],
'active_prob':1.0,
'affirm_overridden_properties':{
'add_to_da_prob':0.5,
}
},#end of first way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'use_slot_sequence': True,
},
},
},#end of seond priority answer
{ 'case1':{'return_acts':['bye'],
'active_prob':0.5,
'affirm_overridden_properties':{
'add_to_da_prob':1.0,
},
},#end of first way in the firs priority answer
'case2':{'return_acts':['thankyou', 'hangup'],
'active_prob':0.5,
'affirm_overridden_properties':{
'add_to_da_prob':1.0,
},
},#end of first way in the firs priority answer
},
],
},#end of the first way of answer
],
'select':[{'return_acts':['inform'],
'active_prob': 1.0,
},
],
'apology':[{'return_acts':[],
'active_prob':1.0,
},
],
'help':[{'return_acts':['negate'],
'active_prob':1.0,
'negate_overridden_properties':{
'act_without_slot': True,
}
},
],
'silence':[{'return_acts':['inform'],
'active_prob':1.0,
'inform_answer_types':{
'direct_answer':0.0,
'over_answer':0.9,
'complete_answer':0.1,
},
'inform_overridden_properties':{
'slot_from': 'none',
'accept_used_slots': True,
#'atleast_slots': ['task'],
},
},
],
'notunderstood':[
{ 'return_acts':['repeat'],
'active_prob': 0.4,
},
{ 'return_acts':['repeat', 'inform'],
'active_prob': 0.6,
'inform_answer_types':{
'direct_answer': 0.0,
'over_answer': 0.4,
'complete_answer':0.6,
},
'inform_overridden_properties':{
'slot_from': 'none',
'accept_used_slots': True,
},
},
],
'irepeat':[{'return_acts':['oog'],
'active_prob':1.0,
},
],
'reqmore':[{'return_acts':['negate'],
'active_prob':0.7,
'negate_overridden_properties':{
'act_without_slot': True,
}
},
{ 'return_acts':['request'],
'active_prob':0.3,
},
],
'hello':[{'return_acts':['hello'],
'active_prob':0.3,#0.1
},
{'return_acts':['hello', 'inform'],
'active_prob':0.7,#0.9
'inform_answer_types':{
'over_answer': 0.8,#0.4
'complete_answer': 0.2,#0.6
},
'inform_overridden_properties':{
'slot_from': 'none',
'atleast_slots': ['task'],
},
'hello_overridden_properties':{
'add_to_da_prob':0.5,
}
},
],
'cant_apply':[{'return_acts':['hangup'],
#'cant_apply':[{'return_acts':[],
'active_prob':1.0,
},
],
'offer':{
0:[{'active_prob':1.0,
'ordered_return_acts':[
{ 'case1':{'return_acts':['affirm', 'inform'],
'active_prob':1.0,
'all_act_valid': True,#all acts in return acts mus appliable !new
'affirm_overridden_properties':{
'add_to_da_prob': 0.0,
},
'inform_overridden_properties':{
'slot_from': 'goal',#take all slots from goal as combinable
'status_included': 'unmentioned',#keep only slot which was not mentioned in this turn
#'limited_slots': [],
#NOTE Should whe support multiple status setting such as unmentioned + incorrect (not save that infor now!
},
},
},
{ 'case1':{'return_acts':['affirm', 'bye'],
'active_prob':0.2,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of first way in the firs priority answer
'case2':{'return_acts':['affirm', 'thankyou', 'bye'],
'active_prob':0.4,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of second way in the firs priority answer
'case3':{'return_acts':['affirm', 'request'],#NOTE: don't ask at the end since the current DM anser have inform(from_stop..
'active_prob':0.2,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of third way in the firs priority answer
'case4':{'return_acts':['affirm', 'reqalts'],
'active_prob':0.2,
'affirm_overridden_properties':{
'add_to_da_prob':0.0,
},
},#end of fourth way in the firs priority answer
},#end of first priority answer
{ 'case1':{'return_acts':['negate', 'inform'],
'active_prob':0.7,
'inform_answer_types':{
'direct_answer':1.0,
},
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'limited_slots': [],
#'use_slot_sequence': True,
},
},
'case2':{'return_acts':['deny', 'inform'],
'active_prob':0.3,
'inform_overridden_properties':{
'slot_from': 'sys_da',
'status_included': 'incorrect',
'value_from': 'goal',
#'limited_slots': [],
#'use_slot_sequence': True,
},
},
}#end of seond priority answer
],#end of the list of ordered answer
},#end of first way of anser
],
1:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
2:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
},
'offer_old_unconditional':{
0:[{'return_acts':['bye'],#definition for goal_id=0
'active_prob':0.2,
},
{'return_acts':['request'],
'active_prob':0.2,
},
{'return_acts':['reqalts'],
'active_prob':0.2,
},
{'return_acts':['thankyou'],
'active_prob':0.4,
},
],
1:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
2:[{'return_acts':['bye'],
'active_prob':0.5,
},
{'return_acts':['thankyou'],
'active_prob':0.5,
},
],
},
'bye':[{'return_acts':['hangup'],
'active_prob':1.0,
}
],
},
'data_observation_probability':{
'time_relative':{
('now',):1.0,#key is row in the table, if table has only one field, need add comma before the end of the tuple
},
'time_relative_full_thanh':{
('as soon as possible',):0.2,#key is row in the table, if table has only one field, need add comma before the end of the tuple
('next hour',):0.1,
('morning',):0.1,
('noon',):0.1,
('afternoon',):0.1,
('night',):0.1,
('midnight',):0.05,
('early morning',):0.05,
('today',):0.1,
('tomorrow',):0.05,
('the day after tomorrow',):0.05,
},
},
},#end of metatdata
},#end of SimpleUserSimulator
},#end of user_simulator
}#end of config
| [
"[email protected]"
] | |
456864271f3e01f15b001804253e5dd219e0b0b0 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/built-in/nlp/Data2vec_for_PyTorch/fairseq/modules/ema_module.py | 7d3733766779e26a60716e82be4ac0eef6859024 | [
"MIT",
"BSD-3-Clause",
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 8,436 | py | #!/usr/bin/env python3
# coding:utf-8
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
"""
Used for EMA tracking a given pytorch module. The user is responsible for calling step()
and setting the appropriate decay
"""
import copy
from dataclasses import dataclass, field
import logging
import torch
from omegaconf import II
from fairseq.dataclass import FairseqDataclass
try:
from amp_C import multi_tensor_l2norm
multi_tensor_l2norm_available = True
except ImportError:
multi_tensor_l2norm_available = False
logger = logging.getLogger(__name__)
@dataclass
class EMAModuleConfig(FairseqDataclass):
ema_decay: float = field(
default=0.9999, metadata={"help": "decay for exponential moving average model"}
)
ema_fp32: bool = field(
default=False,
metadata={"help": "If true, store EMA model in fp32 even if model is in fp16"},
)
add_missing_params: bool = True
log_norms: bool = False
class EMAModule:
"""Exponential Moving Average of Fairseq Models"""
def __init__(
self,
model,
config: EMAModuleConfig,
copy_model=True,
device=None,
skip_keys=None,
):
"""
@param model model to initialize the EMA with
@param config EMAConfig object with configuration like
ema_decay, ema_update_freq, ema_fp32
@param device If provided, copy EMA to this device (e.g. gpu).
Otherwise EMA is in the same device as the model.
"""
self.config = config
if copy_model:
self.model = copy.deepcopy(model)
self.model.requires_grad_(False)
else:
self.model = model
self.config = config
self.decay = config.ema_decay
self.skip_keys = skip_keys or set()
self.add_missing_params = config.add_missing_params
self.fp32_params = {}
if device is not None:
logging.info(f"Copying EMA model to device {device}")
self.model = self.model.to(device=device)
if self.config.ema_fp32:
self.build_fp32_params()
self.log_norms = config.log_norms and multi_tensor_l2norm_available
self.logs = {}
def build_fp32_params(self, state_dict=None):
"""
Store a copy of the EMA params in fp32.
If state dict is passed, the EMA params is copied from
the provided state dict. Otherwise, it is copied from the
current EMA model parameters.
"""
if not self.config.ema_fp32:
raise RuntimeError(
"build_fp32_params should not be called if ema_fp32=False. "
"Use ema_fp32=True if this is really intended."
)
if state_dict is None:
state_dict = self.model.state_dict()
def _to_float(t):
return t.float() if torch.is_floating_point(t) else t
for param_key in state_dict:
if param_key in self.fp32_params:
if param_key == "__sq_mom":
self.fp32_params[param_key] = state_dict[param_key]
else:
self.fp32_params[param_key].copy_(state_dict[param_key])
else:
self.fp32_params[param_key] = _to_float(state_dict[param_key])
if "__sq_mom" in self.fp32_params:
self.fp32_params["__sq_mom"][param_key] = torch.zeros_like(
self.fp32_params[param_key]
)
def restore(self, state_dict, build_fp32_params=False):
"""Load data from a model spec into EMA model"""
self.model.load_state_dict(state_dict, strict=False)
if build_fp32_params:
self.build_fp32_params(state_dict)
def set_decay(self, decay, weight_decay=None):
self.decay = decay
if weight_decay is not None:
self.weight_decay = weight_decay
def get_decay(self):
return self.decay
def _step_internal(self, new_model):
"""One update of the EMA model based on new model weights"""
decay = self.decay
ema_state_dict = {}
ema_params = (
self.fp32_params if self.config.ema_fp32 else self.model.state_dict()
)
new_p = []
ema_p = []
for key, param in new_model.named_parameters():
if isinstance(param, dict):
continue
if not self.add_missing_params and key not in ema_params:
continue
try:
ema_param = ema_params[key]
except KeyError:
ema_param = (
param.float().clone() if param.ndim == 1 else copy.deepcopy(param)
)
ema_params[key] = ema_param
if param.shape != ema_param.shape:
raise ValueError(
"incompatible tensor shapes between model param and ema param"
+ "{} vs. {}".format(param.shape, ema_param.shape)
)
if "version" in key:
# Do not decay a model.version pytorch param
continue
lr = 1 - decay
if key in self.skip_keys or not param.requires_grad:
ema_params[key].copy_(param.to(dtype=ema_param.dtype).data)
ema_param = ema_params[key]
else:
if self.log_norms:
new_p.append(param)
ema_p.append(ema_param)
ema_param.mul_(1 - lr)
ema_param.add_(param.data.to(dtype=ema_param.dtype), alpha=lr)
ema_state_dict[key] = ema_param
for key, param in new_model.named_buffers():
ema_state_dict[key] = param
if self.log_norms:
if "model_norm" in self.logs:
self.prev_model_norm = self.logs["model_norm"]
chunk_size = 2048 * 32
has_inf = torch.zeros(
(1, 1), dtype=torch.int, device=next(new_model.parameters()).device
)
new_norm = multi_tensor_l2norm(chunk_size, has_inf, [new_p], False)
old_norm = multi_tensor_l2norm(chunk_size, has_inf, [ema_p], False)
self.logs["model_norm"] = new_norm[0]
self.logs["ema_norm"] = old_norm[0]
self.restore(ema_state_dict, build_fp32_params=False)
@torch.no_grad()
def step(self, new_model):
self._step_internal(new_model)
def reverse(self, model):
"""
Load the model parameters from EMA model.
Useful for inference or fine-tuning from the EMA model.
"""
d = self.model.state_dict()
if "_ema" in d:
del d["_ema"]
model.load_state_dict(d, strict=False)
return model
| [
"[email protected]"
] | |
a26978798c0b897c4e83d5d4870426ae593e1ff7 | 649255f0d9b6d90be3d3f68263680081f893a089 | /swagger_client/api/remediation_api.py | 53d8a3bb84c470a14ec8ad7b083b1ad8a31fc380 | [] | no_license | khantext/r7ivm3 | 611e1bbc988d9eb8fbb53294d3ed488130e46818 | bd9b25f511f9e7479ea7069d71929700bed09e87 | refs/heads/master | 2023-05-01T10:01:16.336656 | 2021-05-03T18:16:12 | 2021-05-03T18:16:12 | 237,514,737 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 54,161 | py | # coding: utf-8
"""
InsightVM API
# Overview This guide documents the InsightVM Application Programming Interface (API) Version 3. This API supports the Representation State Transfer (REST) design pattern. Unless noted otherwise this API accepts and produces the `application/json` media type. This API uses Hypermedia as the Engine of Application State (HATEOAS) and is hypermedia friendly. All API connections must be made to the security console using HTTPS. ## Versioning Versioning is specified in the URL and the base path of this API is: `https://<host>:<port>/api/3/`. ## Specification An <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md\">OpenAPI v2</a> specification (also known as Swagger 2) of this API is available. Tools such as <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://github.com/swagger-api/swagger-codegen\">swagger-codegen</a> can be used to generate an API client in the language of your choosing using this specification document. <p class=\"openapi\">Download the specification: <a class=\"openapi-button\" target=\"_blank\" rel=\"noopener noreferrer\" download=\"\" href=\"/api/3/json\"> Download </a></p> ## Authentication Authorization to the API uses HTTP Basic Authorization (see <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"https://www.ietf.org/rfc/rfc2617.txt\">RFC 2617</a> for more information). Requests must supply authorization credentials in the `Authorization` header using a Base64 encoded hash of `\"username:password\"`. <!-- ReDoc-Inject: <security-definitions> --> ### 2FA This API supports two-factor authentication (2FA) by supplying an authentication token in addition to the Basic Authorization. The token is specified using the `Token` request header. To leverage two-factor authentication, this must be enabled on the console and be configured for the account accessing the API. ## Resources ### Naming Resource names represent nouns and identify the entity being manipulated or accessed. All collection resources are pluralized to indicate to the client they are interacting with a collection of multiple resources of the same type. Singular resource names are used when there exists only one resource available to interact with. The following naming conventions are used by this API: | Type | Case | | --------------------------------------------- | ------------------------ | | Resource names | `lower_snake_case` | | Header, body, and query parameters parameters | `camelCase` | | JSON fields and property names | `camelCase` | #### Collections A collection resource is a parent resource for instance resources, but can itself be retrieved and operated on independently. Collection resources use a pluralized resource name. The resource path for collection resources follow the convention: ``` /api/3/{resource_name} ``` #### Instances An instance resource is a \"leaf\" level resource that may be retrieved, optionally nested within a collection resource. Instance resources are usually retrievable with opaque identifiers. The resource path for instance resources follows the convention: ``` /api/3/{resource_name}/{instance_id}... ``` ## Verbs The following HTTP operations are supported throughout this API. The general usage of the operation and both its failure and success status codes are outlined below. | Verb | Usage | Success | Failure | | --------- | ------------------------------------------------------------------------------------- | ----------- | -------------------------------------------------------------- | | `GET` | Used to retrieve a resource by identifier, or a collection of resources by type. | `200` | `400`, `401`, `402`, `404`, `405`, `408`, `410`, `415`, `500` | | `POST` | Creates a resource with an application-specified identifier. | `201` | `400`, `401`, `404`, `405`, `408`, `413`, `415`, `500` | | `POST` | Performs a request to queue an asynchronous job. | `202` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Creates a resource with a client-specified identifier. | `200` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `PUT` | Performs a full update of a resource with a specified identifier. | `201` | `400`, `401`, `403`, `405`, `408`, `410`, `413`, `415`, `500` | | `DELETE` | Deletes a resource by identifier or an entire collection of resources. | `204` | `400`, `401`, `405`, `408`, `410`, `413`, `415`, `500` | | `OPTIONS` | Requests what operations are available on a resource. | `200` | `401`, `404`, `405`, `408`, `500` | ### Common Operations #### OPTIONS All resources respond to the `OPTIONS` request, which allows discoverability of available operations that are supported. The `OPTIONS` response returns the acceptable HTTP operations on that resource within the `Allow` header. The response is always a `200 OK` status. ### Collection Resources Collection resources can support the `GET`, `POST`, `PUT`, and `DELETE` operations. #### GET The `GET` operation invoked on a collection resource indicates a request to retrieve all, or some, of the entities contained within the collection. This also includes the optional capability to filter or search resources during the request. The response from a collection listing is a paginated document. See [hypermedia links](#section/Overview/Paging) for more information. #### POST The `POST` is a non-idempotent operation that allows for the creation of a new resource when the resource identifier is not provided by the system during the creation operation (i.e. the Security Console generates the identifier). The content of the `POST` request is sent in the request body. The response to a successful `POST` request should be a `201 CREATED` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. The `POST` to a collection resource can also be used to interact with asynchronous resources. In this situation, instead of a `201 CREATED` response, the `202 ACCEPTED` response indicates that processing of the request is not fully complete but has been accepted for future processing. This request will respond similarly with a `Location` header with link to the job-oriented asynchronous resource that was created and/or queued. #### PUT The `PUT` is an idempotent operation that either performs a create with user-supplied identity, or a full replace or update of a resource by a known identifier. The response to a `PUT` operation to create an entity is a `201 Created` with a valid `Location` header field set to the URI that can be used to access to the newly created resource. `PUT` on a collection resource replaces all values in the collection. The typical response to a `PUT` operation that updates an entity is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. #### DELETE The `DELETE` is an idempotent operation that physically deletes a resource, or removes an association between resources. The typical response to a `DELETE` operation is hypermedia links, which may link to related resources caused by the side-effects of the changes performed. ### Instance Resources Instance resources can support the `GET`, `PUT`, `POST`, `PATCH` and `DELETE` operations. #### GET Retrieves the details of a specific resource by its identifier. The details retrieved can be controlled through property selection and property views. The content of the resource is returned within the body of the response in the acceptable media type. #### PUT Allows for and idempotent \"full update\" (complete replacement) on a specific resource. If the resource does not exist, it will be created; if it does exist, it is completely overwritten. Any omitted properties in the request are assumed to be undefined/null. For \"partial updates\" use `POST` or `PATCH` instead. The content of the `PUT` request is sent in the request body. The identifier of the resource is specified within the URL (not the request body). The response to a successful `PUT` request is a `201 CREATED` to represent the created status, with a valid `Location` header field set to the URI that can be used to access to the newly created (or fully replaced) resource. #### POST Performs a non-idempotent creation of a new resource. The `POST` of an instance resource most commonly occurs with the use of nested resources (e.g. searching on a parent collection resource). The response to a `POST` of an instance resource is typically a `200 OK` if the resource is non-persistent, and a `201 CREATED` if there is a resource created/persisted as a result of the operation. This varies by endpoint. #### PATCH The `PATCH` operation is used to perform a partial update of a resource. `PATCH` is a non-idempotent operation that enforces an atomic mutation of a resource. Only the properties specified in the request are to be overwritten on the resource it is applied to. If a property is missing, it is assumed to not have changed. #### DELETE Permanently removes the individual resource from the system. If the resource is an association between resources, only the association is removed, not the resources themselves. A successful deletion of the resource should return `204 NO CONTENT` with no response body. This operation is not fully idempotent, as follow-up requests to delete a non-existent resource should return a `404 NOT FOUND`. ## Requests Unless otherwise indicated, the default request body media type is `application/json`. ### Headers Commonly used request headers include: | Header | Example | Purpose | | ------------------ | --------------------------------------------- | ---------------------------------------------------------------------------------------------- | | `Accept` | `application/json` | Defines what acceptable content types are allowed by the client. For all types, use `*/*`. | | `Accept-Encoding` | `deflate, gzip` | Allows for the encoding to be specified (such as gzip). | | `Accept-Language` | `en-US` | Indicates to the server the client's locale (defaults `en-US`). | | `Authorization ` | `Basic Base64(\"username:password\")` | Basic authentication | | `Token ` | `123456` | Two-factor authentication token (if enabled) | ### Dates & Times Dates and/or times are specified as strings in the ISO 8601 format(s). The following formats are supported as input: | Value | Format | Notes | | --------------------------- | ------------------------------------------------------ | ----------------------------------------------------- | | Date | YYYY-MM-DD | Defaults to 12 am UTC (if used for a date & time | | Date & time only | YYYY-MM-DD'T'hh:mm:ss[.nnn] | Defaults to UTC | | Date & time in UTC | YYYY-MM-DD'T'hh:mm:ss[.nnn]Z | | | Date & time w/ offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm | | | Date & time w/ zone-offset | YYYY-MM-DD'T'hh:mm:ss[.nnn][+|-]hh:mm[<zone-id>] | | ### Timezones Timezones are specified in the regional zone format, such as `\"America/Los_Angeles\"`, `\"Asia/Tokyo\"`, or `\"GMT\"`. ### Paging Pagination is supported on certain collection resources using a combination of two query parameters, `page` and `size`. As these are control parameters, they are prefixed with the underscore character. The page parameter dictates the zero-based index of the page to retrieve, and the `size` indicates the size of the page. For example, `/resources?page=2&size=10` will return page 3, with 10 records per page, giving results 21-30. The maximum page size for a request is 500. ### Sorting Sorting is supported on paginated resources with the `sort` query parameter(s). The sort query parameter(s) supports identifying a single or multi-property sort with a single or multi-direction output. The format of the parameter is: ``` sort=property[,ASC|DESC]... ``` Therefore, the request `/resources?sort=name,title,DESC` would return the results sorted by the name and title descending, in that order. The sort directions are either ascending `ASC` or descending `DESC`. With single-order sorting, all properties are sorted in the same direction. To sort the results with varying orders by property, multiple sort parameters are passed. For example, the request `/resources?sort=name,ASC&sort=title,DESC` would sort by name ascending and title descending, in that order. ## Responses The following response statuses may be returned by this API. | Status | Meaning | Usage | | ------ | ------------------------ |------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `200` | OK | The operation performed without error according to the specification of the request, and no more specific 2xx code is suitable. | | `201` | Created | A create request has been fulfilled and a resource has been created. The resource is available as the URI specified in the response, including the `Location` header. | | `202` | Accepted | An asynchronous task has been accepted, but not guaranteed, to be processed in the future. | | `400` | Bad Request | The request was invalid or cannot be otherwise served. The request is not likely to succeed in the future without modifications. | | `401` | Unauthorized | The user is unauthorized to perform the operation requested, or does not maintain permissions to perform the operation on the resource specified. | | `403` | Forbidden | The resource exists to which the user has access, but the operating requested is not permitted. | | `404` | Not Found | The resource specified could not be located, does not exist, or an unauthenticated client does not have permissions to a resource. | | `405` | Method Not Allowed | The operations may not be performed on the specific resource. Allowed operations are returned and may be performed on the resource. | | `408` | Request Timeout | The client has failed to complete a request in a timely manner and the request has been discarded. | | `413` | Request Entity Too Large | The request being provided is too large for the server to accept processing. | | `415` | Unsupported Media Type | The media type is not supported for the requested resource. | | `500` | Internal Server Error | An internal and unexpected error has occurred on the server at no fault of the client. | ### Security The response statuses 401, 403 and 404 need special consideration for security purposes. As necessary, error statuses and messages may be obscured to strengthen security and prevent information exposure. The following is a guideline for privileged resource response statuses: | Use Case | Access | Resource | Permission | Status | | ------------------------------------------------------------------ | ------------------ |------------------- | ------------ | ------------ | | Unauthenticated access to an unauthenticated resource. | Unauthenticated | Unauthenticated | Yes | `20x` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Authenticated | No | `401` | | Unauthenticated access to an authenticated resource. | Unauthenticated | Non-existent | No | `401` | | Authenticated access to a unauthenticated resource. | Authenticated | Unauthenticated | Yes | `20x` | | Authenticated access to an authenticated, unprivileged resource. | Authenticated | Authenticated | No | `404` | | Authenticated access to an authenticated, privileged resource. | Authenticated | Authenticated | Yes | `20x` | | Authenticated access to an authenticated, non-existent resource | Authenticated | Non-existent | Yes | `404` | ### Headers Commonly used response headers include: | Header | Example | Purpose | | -------------------------- | --------------------------------- | --------------------------------------------------------------- | | `Allow` | `OPTIONS, GET` | Defines the allowable HTTP operations on a resource. | | `Cache-Control` | `no-store, must-revalidate` | Disables caching of resources (as they are all dynamic). | | `Content-Encoding` | `gzip` | The encoding of the response body (if any). | | `Location` | | Refers to the URI of the resource created by a request. | | `Transfer-Encoding` | `chunked` | Specified the encoding used to transform response. | | `Retry-After` | 5000 | Indicates the time to wait before retrying a request. | | `X-Content-Type-Options` | `nosniff` | Disables MIME type sniffing. | | `X-XSS-Protection` | `1; mode=block` | Enables XSS filter protection. | | `X-Frame-Options` | `SAMEORIGIN` | Prevents rendering in a frame from a different origin. | | `X-UA-Compatible` | `IE=edge,chrome=1` | Specifies the browser mode to render in. | ### Format When `application/json` is returned in the response body it is always pretty-printed (indented, human readable output). Additionally, gzip compression/encoding is supported on all responses. #### Dates & Times Dates or times are returned as strings in the ISO 8601 'extended' format. When a date and time is returned (instant) the value is converted to UTC. For example: | Value | Format | Example | | --------------- | ------------------------------ | --------------------- | | Date | `YYYY-MM-DD` | 2017-12-03 | | Date & Time | `YYYY-MM-DD'T'hh:mm:ss[.nnn]Z` | 2017-12-03T10:15:30Z | #### Content In some resources a Content data type is used. This allows for multiple formats of representation to be returned within resource, specifically `\"html\"` and `\"text\"`. The `\"text\"` property returns a flattened representation suitable for output in textual displays. The `\"html\"` property returns an HTML fragment suitable for display within an HTML element. Note, the HTML returned is not a valid stand-alone HTML document. #### Paging The response to a paginated request follows the format: ```json { resources\": [ ... ], \"page\": { \"number\" : ..., \"size\" : ..., \"totalResources\" : ..., \"totalPages\" : ... }, \"links\": [ \"first\" : { \"href\" : \"...\" }, \"prev\" : { \"href\" : \"...\" }, \"self\" : { \"href\" : \"...\" }, \"next\" : { \"href\" : \"...\" }, \"last\" : { \"href\" : \"...\" } ] } ``` The `resources` property is an array of the resources being retrieved from the endpoint, each which should contain at minimum a \"self\" relation hypermedia link. The `page` property outlines the details of the current page and total possible pages. The object for the page includes the following properties: - number - The page number (zero-based) of the page returned. - size - The size of the pages, which is less than or equal to the maximum page size. - totalResources - The total amount of resources available across all pages. - totalPages - The total amount of pages. The last property of the paged response is the `links` array, which contains all available hypermedia links. For paginated responses, the \"self\", \"next\", \"previous\", \"first\", and \"last\" links are returned. The \"self\" link must always be returned and should contain a link to allow the client to replicate the original request against the collection resource in an identical manner to that in which it was invoked. The \"next\" and \"previous\" links are present if either or both there exists a previous or next page, respectively. The \"next\" and \"previous\" links have hrefs that allow \"natural movement\" to the next page, that is all parameters required to move the next page are provided in the link. The \"first\" and \"last\" links provide references to the first and last pages respectively. Requests outside the boundaries of the pageable will result in a `404 NOT FOUND`. Paginated requests do not provide a \"stateful cursor\" to the client, nor does it need to provide a read consistent view. Records in adjacent pages may change while pagination is being traversed, and the total number of pages and resources may change between requests within the same filtered/queries resource collection. #### Property Views The \"depth\" of the response of a resource can be configured using a \"view\". All endpoints supports two views that can tune the extent of the information returned in the resource. The supported views are `summary` and `details` (the default). View are specified using a query parameter, in this format: ```bash /<resource>?view={viewName} ``` #### Error Any error responses can provide a response body with a message to the client indicating more information (if applicable) to aid debugging of the error. All 40x and 50x responses will return an error response in the body. The format of the response is as follows: ```json { \"status\": <statusCode>, \"message\": <message>, \"links\" : [ { \"rel\" : \"...\", \"href\" : \"...\" } ] } ``` The `status` property is the same as the HTTP status returned in the response, to ease client parsing. The message property is a localized message in the request client's locale (if applicable) that articulates the nature of the error. The last property is the `links` property. This may contain additional [hypermedia links](#section/Overview/Authentication) to troubleshoot. #### Search Criteria <a section=\"section/Responses/SearchCriteria\"></a> Multiple resources make use of search criteria to match assets. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": <value>,] [\"lower\": <value>,] [\"upper\": <value>] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The operator is a type and property-specific operating performed on the filtered property. The valid values for fields and operators are outlined in the table below. Depending on the data type of the operator the value may be a numeric or string format. Every filter also defines one or more values that are supplied to the operator. The valid values vary by operator and are outlined below. ##### Fields The following table outlines the search criteria fields and the available operators: | Field | Operators | | --------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | | `alternate-address-type` | `in` | | `container-image` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-like` `not-like` | | `container-status` | `is` `is-not` | | `containers` | `are` | | `criticality-tag` | `is` `is-not` `is-greater-than` `is-less-than` `is-applied` ` is-not-applied` | | `custom-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `cve` | `is` `is-not` `contains` `does-not-contain` | | `cvss-access-complexity` | `is` `is-not` | | `cvss-authentication-required` | `is` `is-not` | | `cvss-access-vector` | `is` `is-not` | | `cvss-availability-impact` | `is` `is-not` | | `cvss-confidentiality-impact` | `is` `is-not` | | `cvss-integrity-impact` | `is` `is-not` | | `cvss-v3-confidentiality-impact` | `is` `is-not` | | `cvss-v3-integrity-impact` | `is` `is-not` | | `cvss-v3-availability-impact` | `is` `is-not` | | `cvss-v3-attack-vector` | `is` `is-not` | | `cvss-v3-attack-complexity` | `is` `is-not` | | `cvss-v3-user-interaction` | `is` `is-not` | | `cvss-v3-privileges-required` | `is` `is-not` | | `host-name` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-empty` `is-not-empty` `is-like` `not-like` | | `host-type` | `in` `not-in` | | `ip-address` | `is` `is-not` `in-range` `not-in-range` `is-like` `not-like` | | `ip-address-type` | `in` `not-in` | | `last-scan-date` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `location-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `mobile-device-last-sync-time` | `is-within-the-last` `is-earlier-than` | | `open-ports` | `is` `is-not` ` in-range` | | `operating-system` | `contains` ` does-not-contain` ` is-empty` ` is-not-empty` | | `owner-tag` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` `is-applied` `is-not-applied` | | `pci-compliance` | `is` | | `risk-score` | `is` `is-not` `is-greater-than` `is-less-than` `in-range` | | `service-name` | `contains` `does-not-contain` | | `site-id` | `in` `not-in` | | `software` | `contains` `does-not-contain` | | `vAsset-cluster` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-datacenter` | `is` `is-not` | | `vAsset-host-name` | `is` `is-not` `contains` `does-not-contain` `starts-with` | | `vAsset-power-state` | `in` `not-in` | | `vAsset-resource-pool-path` | `contains` `does-not-contain` | | `vulnerability-assessed` | `is-on-or-before` `is-on-or-after` `is-between` `is-earlier-than` `is-within-the-last` | | `vulnerability-category` | `is` `is-not` `starts-with` `ends-with` `contains` `does-not-contain` | | `vulnerability-cvss-v3-score` | `is` `is-not` | | `vulnerability-cvss-score` | `is` `is-not` `in-range` `is-greater-than` `is-less-than` | | `vulnerability-exposures` | `includes` `does-not-include` | | `vulnerability-title` | `contains` `does-not-contain` `is` `is-not` `starts-with` `ends-with` | | `vulnerability-validated-status` | `are` | ##### Enumerated Properties The following fields have enumerated values: | Field | Acceptable Values | | ----------------------------------------- | ------------------------------------------------------------------------------------------------------------- | | `alternate-address-type` | 0=IPv4, 1=IPv6 | | `containers` | 0=present, 1=not present | | `container-status` | `created` `running` `paused` `restarting` `exited` `dead` `unknown` | | `cvss-access-complexity` | <ul><li><code>L</code> = Low</li><li><code>M</code> = Medium</li><li><code>H</code> = High</li></ul> | | `cvss-integrity-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-confidentiality-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-availability-impact` | <ul><li><code>N</code> = None</li><li><code>P</code> = Partial</li><li><code>C</code> = Complete</li></ul> | | `cvss-access-vector` | <ul><li><code>L</code> = Local</li><li><code>A</code> = Adjacent</li><li><code>N</code> = Network</li></ul> | | `cvss-authentication-required` | <ul><li><code>N</code> = None</li><li><code>S</code> = Single</li><li><code>M</code> = Multiple</li></ul> | | `cvss-v3-confidentiality-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-integrity-impact` | <ul><li><code>L</code> = Local</li><li><code>L</code> = Low</li><li><code>N</code> = None</li><li><code>H</code> = High</li></ul> | | `cvss-v3-availability-impact` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-attack-vector` | <ul><li><code>N</code> = Network</li><li><code>A</code> = Adjacent</li><li><code>L</code> = Local</li><li><code>P</code> = Physical</li></ul> | | `cvss-v3-attack-complexity` | <ul><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `cvss-v3-user-interaction` | <ul><li><code>N</code> = None</li><li><code>R</code> = Required</li></ul> | | `cvss-v3-privileges-required` | <ul><li><code>N</code> = None</li><li><code>L</code> = Low</li><li><code>H</code> = High</li></ul> | | `host-type` | 0=Unknown, 1=Guest, 2=Hypervisor, 3=Physical, 4=Mobile | | `ip-address-type` | 0=IPv4, 1=IPv6 | | `pci-compliance` | 0=fail, 1=pass | | `vulnerability-validated-status` | 0=present, 1=not present | ##### Operator Properties <a section=\"section/Responses/SearchCriteria/OperatorProperties\"></a> The following table outlines which properties are required for each operator and the appropriate data type(s): | Operator | `value` | `lower` | `upper` | | ----------------------|-----------------------|-----------------------|------------------------| | `are` | `string` | | | | `contains` | `string` | | | | `does-not-contain` | `string` | | | | `ends with` | `string` | | | | `in` | `Array[ string ]` | | | | `in-range` | | `numeric` | `numeric` | | `includes` | `Array[ string ]` | | | | `is` | `string` | | | | `is-applied` | | | | | `is-between` | | `string` (yyyy-MM-dd) | `numeric` (yyyy-MM-dd) | | `is-earlier-than` | `numeric` (days) | | | | `is-empty` | | | | | `is-greater-than` | `numeric` | | | | `is-on-or-after` | `string` (yyyy-MM-dd) | | | | `is-on-or-before` | `string` (yyyy-MM-dd) | | | | `is-not` | `string` | | | | `is-not-applied` | | | | | `is-not-empty` | | | | | `is-within-the-last` | `numeric` (days) | | | | `less-than` | `string` | | | | `like` | `string` | | | | `not-contains` | `string` | | | | `not-in` | `Array[ string ]` | | | | `not-in-range` | | `numeric` | `numeric` | | `not-like` | `string` | | | | `starts-with` | `string` | | | #### Discovery Connection Search Criteria <a section=\"section/Responses/DiscoverySearchCriteria\"></a> Dynamic sites make use of search criteria to match assets from a discovery connection. Search criteria is an array of search filters. Each search filter has a generic format of: ```json { \"field\": \"<field-name>\", \"operator\": \"<operator>\", [\"value\": \"<value>\",] [\"lower\": \"<value>\",] [\"upper\": \"<value>\"] } ``` Every filter defines two required properties `field` and `operator`. The field is the name of an asset property that is being filtered on. The list of supported fields vary depending on the type of discovery connection configured for the dynamic site (e.g vSphere, ActiveSync, etc.). The operator is a type and property-specific operating performed on the filtered property. The valid values for fields outlined in the tables below and are grouped by the type of connection. Every filter also defines one or more values that are supplied to the operator. See <a href=\"#section/Responses/SearchCriteria/OperatorProperties\">Search Criteria Operator Properties</a> for more information on the valid values for each operator. ##### Fields (ActiveSync) This section documents search criteria information for ActiveSync discovery connections. The discovery connections must be one of the following types: `\"activesync-ldap\"`, `\"activesync-office365\"`, or `\"activesync-powershell\"`. The following table outlines the search criteria fields and the available operators for ActiveSync connections: | Field | Operators | | --------------------------------- | ------------------------------------------------------------- | | `last-sync-time` | `is-within-the-last` ` is-earlier-than` | | `operating-system` | `contains` ` does-not-contain` | | `user` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (AWS) This section documents search criteria information for AWS discovery connections. The discovery connections must be the type `\"aws\"`. The following table outlines the search criteria fields and the available operators for AWS connections: | Field | Operators | | ----------------------- | ------------------------------------------------------------- | | `availability-zone` | `contains` ` does-not-contain` | | `guest-os-family` | `contains` ` does-not-contain` | | `instance-id` | `contains` ` does-not-contain` | | `instance-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `instance-state` | `in` ` not-in` | | `instance-type` | `in` ` not-in` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `region` | `in` ` not-in` | | `vpc-id` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (DHCP) This section documents search criteria information for DHCP discovery connections. The discovery connections must be the type `\"dhcp\"`. The following table outlines the search criteria fields and the available operators for DHCP connections: | Field | Operators | | --------------- | ------------------------------------------------------------- | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `mac-address` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Fields (Sonar) This section documents search criteria information for Sonar discovery connections. The discovery connections must be the type `\"sonar\"`. The following table outlines the search criteria fields and the available operators for Sonar connections: | Field | Operators | | ------------------- | -------------------- | | `search-domain` | `contains` ` is` | | `ip-address` | `in-range` ` is` | | `sonar-scan-date` | `is-within-the-last` | ##### Fields (vSphere) This section documents search criteria information for vSphere discovery connections. The discovery connections must be the type `\"vsphere\"`. The following table outlines the search criteria fields and the available operators for vSphere connections: | Field | Operators | | -------------------- | ------------------------------------------------------------------------------------------ | | `cluster` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `data-center` | `is` ` is-not` | | `discovered-time` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `guest-os-family` | `contains` ` does-not-contain` | | `host-name` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | | `ip-address` | `in-range` ` not-in-range` ` is` ` is-not` | | `power-state` | `in` ` not-in` | | `resource-pool-path` | `contains` ` does-not-contain` | | `last-time-seen` | `is-on-or-before` ` is-on-or-after` ` is-between` ` is-earlier-than` ` is-within-the-last` | | `vm` | `is` ` is-not` ` contains` ` does-not-contain` ` starts-with` | ##### Enumerated Properties (vSphere) The following fields have enumerated values: | Field | Acceptable Values | | ------------- | ------------------------------------ | | `power-state` | `poweredOn` `poweredOff` `suspended` | ## HATEOAS This API follows Hypermedia as the Engine of Application State (HATEOAS) principals and is therefore hypermedia friendly. Hyperlinks are returned in the `links` property of any given resource and contain a fully-qualified hyperlink to the corresponding resource. The format of the hypermedia link adheres to both the <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org\">{json:api} v1</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://jsonapi.org/format/#document-links\">\"Link Object\"</a> and <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html\">JSON Hyper-Schema</a> <a target=\"_blank\" rel=\"noopener noreferrer\" href=\"http://json-schema.org/latest/json-schema-hypermedia.html#rfc.section.5.2\">\"Link Description Object\"</a> formats. For example: ```json \"links\": [{ \"rel\": \"<relation>\", \"href\": \"<href>\" ... }] ``` Where appropriate link objects may also contain additional properties than the `rel` and `href` properties, such as `id`, `type`, etc. See the [Root](#tag/Root) resources for the entry points into API discovery. # noqa: E501
OpenAPI spec version: 3
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class RemediationApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_asset_vulnerability_solutions(self, id, vulnerability_id, **kwargs): # noqa: E501
"""Asset Vulnerability Solution # noqa: E501
Returns the highest-superceding rollup solutions for a vulnerability on an asset. The solution(s) selected will be the most recent and cost-effective means by which the vulnerability can be remediated. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_asset_vulnerability_solutions(id, vulnerability_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the asset. (required)
:param str vulnerability_id: The identifier of the vulnerability. (required)
:return: ResourcesMatchedSolution
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_asset_vulnerability_solutions_with_http_info(id, vulnerability_id, **kwargs) # noqa: E501
else:
(data) = self.get_asset_vulnerability_solutions_with_http_info(id, vulnerability_id, **kwargs) # noqa: E501
return data
def get_asset_vulnerability_solutions_with_http_info(self, id, vulnerability_id, **kwargs): # noqa: E501
"""Asset Vulnerability Solution # noqa: E501
Returns the highest-superceding rollup solutions for a vulnerability on an asset. The solution(s) selected will be the most recent and cost-effective means by which the vulnerability can be remediated. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_asset_vulnerability_solutions_with_http_info(id, vulnerability_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: The identifier of the asset. (required)
:param str vulnerability_id: The identifier of the vulnerability. (required)
:return: ResourcesMatchedSolution
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'vulnerability_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_asset_vulnerability_solutions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_asset_vulnerability_solutions`") # noqa: E501
# verify the required parameter 'vulnerability_id' is set
if ('vulnerability_id' not in params or
params['vulnerability_id'] is None):
raise ValueError("Missing the required parameter `vulnerability_id` when calling `get_asset_vulnerability_solutions`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
if 'vulnerability_id' in params:
path_params['vulnerabilityId'] = params['vulnerability_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json;charset=UTF-8']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/3/assets/{id}/vulnerabilities/{vulnerabilityId}/solution', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResourcesMatchedSolution', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| [
"[email protected]"
] | |
1941daf3147a52db83a326417991ec09c645959f | d90283bff72b5a55dd4d0f90c7325355b00ce7b1 | /p1804/p10/打印整数.py | 915fda83f4b58c9a3908ce840830515fba53fb09 | [] | no_license | yuemeiss/p1804daima | f841f52e63081d53d50a199e4d148d4533605bb6 | 6ea08eb9971e42bf4ac535033a006d98ed98bf98 | refs/heads/master | 2020-03-15T23:29:59.691297 | 2018-08-06T02:42:49 | 2018-08-06T02:42:49 | 132,395,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | for i in range(1,5000):
if i % 5 == 0 and i%7==0:
print("能被5和7整除的是: %d "% i)
| [
"[email protected]"
] | |
49de7e6ce41f348e586e2eefc9b9a5e0127f92ad | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03574/s538402697.py | a100b6d62d5fdc1b9953e127ac04d0761a0d8b81 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | h,w=map(int,input().split())
s=["."*(w+2)]
for i in range(h):
s.append("."+input()+".")
s.append("."*(w+2))
dx=[-1,0,1,1,1,0,-1,-1]
dy=[1,1,1,0,-1,-1,-1,0]
ans=[]
for i in range(1,h+1):
wp=""
for j in range(1,w+1):
if s[i][j]=="#":
wp+="#"
continue
count=0
for k in range(8):
if s[i+dy[k]][j+dx[k]]=="#":
count+=1
wp+=str(count)
ans.append(wp)
print(*ans,sep="\n") | [
"[email protected]"
] | |
6099e986b2054b690030adc9e7e17a767ae0e2b4 | c6fa248ec5a7e3c67afac98e365cac850c511473 | /generative_adversarial_networks/code/chapter_08/04_train_discriminator.py | c79e832de127b1bae5f94a1889e27d01ecef99ac | [] | no_license | shenjnigxing/deep-learning-material | 44830e07cc2a5bd47b07ca903c1f2b65beef22bb | 24dfee3b9fe1a40303cb2dfe256028d35113babf | refs/heads/master | 2022-12-23T10:08:05.881432 | 2020-09-16T02:24:38 | 2020-09-16T02:24:38 | 295,900,907 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,988 | py | # example of training the discriminator model on real and random cifar10 images
from numpy import ones
from numpy import zeros
from numpy.random import rand
from numpy.random import randint
from keras.datasets.cifar10 import load_data
from keras.optimizers import Adam
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv2D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.layers import LeakyReLU
# define the standalone discriminator model
def define_discriminator(in_shape=(32,32,3)):
model = Sequential()
# normal
model.add(Conv2D(64, (3,3), padding='same', input_shape=in_shape))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(256, (3,3), strides=(2,2), padding='same'))
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
# compile model
opt = Adam(lr=0.0002, beta_1=0.5)
model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])
return model
# load and prepare cifar10 training images
def load_real_samples():
# load cifar10 dataset
(trainX, _), (_, _) = load_data()
# convert from unsigned ints to floats
X = trainX.astype('float32')
# scale from [0,255] to [-1,1]
X = (X - 127.5) / 127.5
return X
# select real samples
def generate_real_samples(dataset, n_samples):
# choose random instances
ix = randint(0, dataset.shape[0], n_samples)
# retrieve selected images
X = dataset[ix]
# generate 'real' class labels (1)
y = ones((n_samples, 1))
return X, y
# generate n fake samples with class labels
def generate_fake_samples(n_samples):
# generate uniform random numbers in [0,1]
X = rand(32 * 32 * 3 * n_samples)
# update to have the range [-1, 1]
X = -1 + X * 2
# reshape into a batch of color images
X = X.reshape((n_samples, 32, 32, 3))
# generate 'fake' class labels (0)
y = zeros((n_samples, 1))
return X, y
# train the discriminator model
def train_discriminator(model, dataset, n_iter=20, n_batch=128):
half_batch = int(n_batch / 2)
# manually enumerate epochs
for i in range(n_iter):
# get randomly selected 'real' samples
X_real, y_real = generate_real_samples(dataset, half_batch)
# update discriminator on real samples
_, real_acc = model.train_on_batch(X_real, y_real)
# generate 'fake' examples
X_fake, y_fake = generate_fake_samples(half_batch)
# update discriminator on fake samples
_, fake_acc = model.train_on_batch(X_fake, y_fake)
# summarize performance
print('>%d real=%.0f%% fake=%.0f%%' % (i+1, real_acc*100, fake_acc*100))
# define the discriminator model
model = define_discriminator()
# load image data
dataset = load_real_samples()
# fit the model
train_discriminator(model, dataset) | [
"[email protected]"
] | |
e98140b6ab4a78c3e01cdec72713cc4484a5594a | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc118/A/4965258.py | 809490a42b13f7fe626c23248f25ee09f7538d58 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | a, b = map(int, input().split())
if b % a == 0:
print(a + b)
else:
print(b - a) | [
"[email protected]"
] | |
4cd79181130987db75faf7e250e83b9863e339bb | 5d6dd782e0b29817b3c27d5d6984909152813444 | /dbbase/urls.py | 3d183271c6790a11b27359533230ad4817dbcaab | [] | no_license | smartslee/hospacc | 387d8a7e42e068080738e365045a23d6d8a1f222 | 5bd42a9e729f3c90ff4b87185167f64fe79aac01 | refs/heads/master | 2020-04-01T12:59:50.743213 | 2019-10-07T08:13:41 | 2019-10-07T08:13:41 | 153,232,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,111 | py | from django.urls import path
from . import views
from .views import (HdbUpdateView,IndexView, SearchFormView,HdbCreateView,HdbDeleteView, HdbprintView)
urlpatterns = [
path('list/', views.index, name ='list'),
# url(r'^dbedit/', views.hospdb_list, name ='edit'),
path('input/', views.inputdb, name ='inputdbn'),
path('', views.homep, name ='home'),
path('dblistView/', views.IndexView.as_view(), name ='indexview'),
path('<int:pk>/', views.HdbdetailView.as_view(), name="detail"),
path('print(<int:pk>)/', views.HdbprintView.as_view(), name="print"),
path('hdb/add/', views.HdbCreateView.as_view(), name="hdb_add"),
path('update/<int:pk>/', HdbUpdateView.as_view(), name='update'),
path('delete/<int:pk>/', HdbDeleteView.as_view(), name='delete'),
#url(r'^list$',ProductListView.as_view(), name="ProductListView"),
# url(r'^list/(?P<pk>\d+)/$',ProductDetailView.as_view(), name="ProductDetailview"),
path('search',SearchFormView.as_view(),name='search'),
path('login/', views.signin, name='login'),
path('logout/', views.logout, name='logout'),
] | [
"[email protected]"
] | |
a76256e5c53a0f726234358d2eeec7cce0cde04f | 06ab66fe85631fb8e0351245af483b3a8e98295b | /src/config/logger.py | a708dd302034317cdf2dbf836a63869ed4a63415 | [] | no_license | SeanCherngTW/toy-real-time-bidding-buyer | ed62d8e60f196bff06ad69765f7ae8e711b66ea1 | 82e09598649d2ffd4aecc6356257fa3c5a0504ea | refs/heads/main | 2023-06-12T18:19:07.445796 | 2021-07-05T14:16:40 | 2021-07-05T14:16:40 | 383,154,896 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,239 | py | import os
import logging
from os.path import exists
from logging import handlers
class DebugLog(object):
def __init__(self, ad_path_config):
self.model_name = ad_path_config['model_name']
self.log_file_path = ad_path_config['log_file_path'] + self.model_name + ".log"
self.dst_dir = ad_path_config['dst_dir']
self.prepare_log_path()
self.logger = self.logger_initialize()
self.logger.propagate = False
def prepare_log_path(self):
if not os.path.exists(self.dst_dir):
os.mkdir(self.dst_dir)
def logger_initialize(self):
logger = logging.getLogger(self.model_name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(
'[%(asctime)s] - [%(name)s] - [%(filename)s] - %(levelname)s - %(message)s'
)
fh = handlers.RotatingFileHandler(
filename=self.log_file_path,
backupCount=1,
encoding="utf-8",
)
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
| [
"[email protected]"
] | |
773bd8d5905ffdfbfc401c174598d1d6aa238f05 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/3/hby.py | d2745b4aff60063d0c31bd24fb6ac64149e7e987 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'hBY':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
829a60803827790a24c17e21c99521fc7746dd54 | bae7e3b6cdfd6f354b79dbc849c1969a46aed586 | /hiAPP/plot_linkage_matrix.py | 62439c042f1b38aa4eb1a36072056960d65d5d01 | [
"MIT"
] | permissive | jmborr/LDRDSANS | 7f6b8ef44db3b93972ae9bff08a641067c19bae1 | b8081ecb78da46a530d61efd3cb6764f3b17b567 | refs/heads/master | 2021-07-24T23:49:38.271100 | 2017-11-05T22:36:40 | 2017-11-05T22:36:40 | 71,494,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py | # -*- coding: utf-8 -*-
"""
Matplotlib of the dendogram associated with the linkage matrix.
Thanks to Jorn's Blog
<https://joernhees.de/blog/2015/08/26/scipy-hierarchical-clustering-and-dendrogram-tutorial/>
"""
# needed imports
from matplotlib import pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
import numpy as np
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plots a dendogram from a scipy.cluster.hierarchy linkage matrix.")
parser.add_argument("linkage", type=str, help="linkage matrix file, output from fpch2scph.py")
parser.add_argument("--p", type=int, default=10, help="show only the last p merged clusters")
args=parser.parse_args()
Z=np.loadtxt(args.linkage)
plt.title('Hierarchical Clustering Dendrogram (truncated)')
plt.xlabel('sample index')
plt.ylabel('RMSD (Angstroms)')
dendrogram(
Z,
truncate_mode='lastp', # show only the last p merged clusters
p=args.p, # show only the last p merged clusters
show_leaf_counts=False, # otherwise numbers in brackets are counts
leaf_rotation=90.,
leaf_font_size=12.,
show_contracted=True, # to get a distribution impression in truncated branches
)
plt.show()
sys.exit(0)
| [
"[email protected]"
] | |
ce67d5e4cbc106774ba02c02cb38b2fa7b165403 | b01eee55884e21412a1812593996a0d9156e20bc | /cipp/x64assembler/instructions/push_reg.py | d3c6d03e68af5bf12c7f9965096d230b1733a50b | [] | no_license | JacquesLucke/cipp | 46bdb7eebaeb863f424c92542ea56b49b5f0fe2e | d4f38fd1fc84aed9cbf49b85bf6c4b96f2561f71 | refs/heads/master | 2021-10-27T18:29:23.288884 | 2019-04-18T15:36:52 | 2019-04-18T15:36:52 | 123,611,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 761 | py | from .. bits import Bits
from .. block import Instruction
class PushRegInstr(Instruction):
def __init__(self, reg):
assert reg.size in (16, 64)
self.reg = reg
def toIntelSyntax(self):
return f"push {self.reg.name}"
def toMachineCode(self):
if self.reg.size == 64:
return self.toMachineCode_64()
elif self.reg.size == 16:
return self.toMachineCode_16()
else:
raise Exception()
def toMachineCode_64(self):
prefix = Bits.fromHex("" if self.reg.group == 0 else "41")
opcode = Bits.fromHexAndOffset("50", self.reg.number)
return prefix + opcode
def toMachineCode_16(self):
return Bits.fromHex("66") + self.toMachineCode_64()
| [
"[email protected]"
] | |
0054ca5cde322d97a8151893ce49bbc4034e3353 | 130a98632d2ab4c171503b79e455b7aa27a1dda4 | /models/research/object_detection/models/ssd_feature_extractor_test.py | 29c43e376c6167b61a256eb0812ee4d3bcee3ed5 | [
"Apache-2.0",
"MIT"
] | permissive | aboerzel/German_License_Plate_Recognition | d7fc0314295f5cf0c9d7ae9c93a795e3ef1c5787 | 6fc53292b1d3ce3c0340ce724c2c11c77e663d27 | refs/heads/master | 2023-01-30T18:08:37.339542 | 2023-01-07T07:41:36 | 2023-01-07T07:41:36 | 245,586,430 | 34 | 12 | MIT | 2023-01-07T07:41:37 | 2020-03-07T07:16:51 | Python | UTF-8 | Python | false | false | 9,695 | py | # Lint as: python2, python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base test class SSDFeatureExtractors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from abc import abstractmethod
import numpy as np
from six.moves import zip
import tensorflow.compat.v1 as tf
import tf_slim as slim
from google.protobuf import text_format
from object_detection.builders import hyperparams_builder
from object_detection.protos import hyperparams_pb2
from object_detection.utils import test_case
from object_detection.utils import test_utils
class SsdFeatureExtractorTestBase(test_case.TestCase):
def _build_conv_hyperparams(self, add_batch_norm=True):
conv_hyperparams = hyperparams_pb2.Hyperparams()
conv_hyperparams_text_proto = """
activation: RELU_6
regularizer {
l2_regularizer {
}
}
initializer {
truncated_normal_initializer {
}
}
"""
if add_batch_norm:
batch_norm_proto = """
batch_norm {
scale: false
}
"""
conv_hyperparams_text_proto += batch_norm_proto
text_format.Merge(conv_hyperparams_text_proto, conv_hyperparams)
return hyperparams_builder.KerasLayerHyperparams(conv_hyperparams)
def conv_hyperparams_fn(self):
with slim.arg_scope([]) as sc:
return sc
@abstractmethod
def _create_feature_extractor(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
"""Constructs a new feature extractor.
Args:
depth_multiplier: float depth multiplier for feature extractor
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
use_explicit_padding: use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
num_layers: number of SSD layers.
use_keras: if True builds a keras-based feature extractor, if False builds
a slim-based one.
use_depthwise: Whether to use depthwise convolutions.
Returns:
an ssd_meta_arch.SSDFeatureExtractor or an
ssd_meta_arch.SSDKerasFeatureExtractor object.
"""
pass
def _create_features(self,
depth_multiplier,
pad_to_multiple,
use_explicit_padding=False,
use_depthwise=False,
num_layers=6,
use_keras=False):
kwargs = {}
if use_explicit_padding:
kwargs.update({'use_explicit_padding': use_explicit_padding})
if use_depthwise:
kwargs.update({'use_depthwise': use_depthwise})
if num_layers != 6:
kwargs.update({'num_layers': num_layers})
if use_keras:
kwargs.update({'use_keras': use_keras})
feature_extractor = self._create_feature_extractor(
depth_multiplier,
pad_to_multiple,
**kwargs)
return feature_extractor
def _extract_features(self,
image_tensor,
feature_extractor,
use_keras=False):
if use_keras:
feature_maps = feature_extractor(image_tensor)
else:
feature_maps = feature_extractor.extract_features(image_tensor)
return feature_maps
def check_extract_features_returns_correct_shape(self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_tensor):
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
image_tensor = np.random.rand(batch_size, image_height, image_width,
3).astype(np.float32)
feature_maps = self.execute(graph_fn, [image_tensor], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_returns_correct_shapes_with_dynamic_inputs(
self,
batch_size,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
expected_feature_map_shapes,
use_explicit_padding=False,
num_layers=6,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_explicit_padding=use_explicit_padding,
num_layers=num_layers,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn(image_height, image_width):
image_tensor = tf.random_uniform([batch_size, image_height, image_width,
3], dtype=tf.float32)
return self._extract_features(
image_tensor,
feature_extractor,
use_keras=use_keras)
feature_maps = self.execute_cpu(graph_fn, [
np.array(image_height, dtype=np.int32),
np.array(image_width, dtype=np.int32)
], graph=g)
for feature_map, expected_shape in zip(
feature_maps, expected_feature_map_shapes):
self.assertAllEqual(feature_map.shape, expected_shape)
def check_extract_features_raises_error_with_invalid_image_size(
self,
image_height,
image_width,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
with test_utils.GraphContextOrNone() as g:
batch = 4
width = tf.random.uniform([], minval=image_width, maxval=image_width+1,
dtype=tf.int32)
height = tf.random.uniform([], minval=image_height, maxval=image_height+1,
dtype=tf.int32)
shape = tf.stack([batch, height, width, 3])
preprocessed_inputs = tf.random.uniform(shape)
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
def graph_fn():
feature_maps = self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return feature_maps
if self.is_tf2():
with self.assertRaises(ValueError):
self.execute_cpu(graph_fn, [], graph=g)
else:
with self.assertRaises(tf.errors.InvalidArgumentError):
self.execute_cpu(graph_fn, [], graph=g)
def check_feature_extractor_variables_under_scope(self,
depth_multiplier,
pad_to_multiple,
scope_name,
use_keras=False,
use_depthwise=False):
variables = self.get_feature_extractor_variables(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
for variable in variables:
self.assertTrue(variable.name.startswith(scope_name))
def get_feature_extractor_variables(self,
depth_multiplier,
pad_to_multiple,
use_keras=False,
use_depthwise=False):
g = tf.Graph()
with g.as_default():
feature_extractor = self._create_features(
depth_multiplier,
pad_to_multiple,
use_keras=use_keras,
use_depthwise=use_depthwise)
preprocessed_inputs = tf.placeholder(tf.float32, (4, None, None, 3))
self._extract_features(
preprocessed_inputs,
feature_extractor,
use_keras=use_keras)
return g.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
| [
"[email protected]"
] | |
a19341832df5aa7bd0970ac6ef6b9c9a7279c21a | 73b5d880fa06943c20ff0a9aee9d0c1d1eeebe10 | /tinyos-1.x/contrib/ucb/apps/LandmarkRouting/lossy.py | 404b3df55a95a17dbacc58e49ca3b896c54ce7b8 | [
"Intel"
] | permissive | x3ro/tinyos-legacy | 101d19f9e639f5a9d59d3edd4ed04b1f53221e63 | cdc0e7ba1cac505fcace33b974b2e0aca1ccc56a | refs/heads/master | 2021-01-16T19:20:21.744228 | 2015-06-30T20:23:05 | 2015-06-30T20:23:05 | 38,358,728 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 781 | py | from simcore import *
if not sim.__driver.pluginManager.getPlugin("RadioModelGuiPlugin").isRegistered():
print "Please create radio model first using the Radio Model Plugin."
else:
pf = open('packet','w')
space = ' '
end = ' 0.0 0.0\n'
for i in motes:
for j in motes:
s = str(i.getID()) + space + str(j.getID()) + space
if i.getID() == j.getID():
continue
elif i.getID() == 1 or i.getID() == 0:
continue
elif j.getID() == 1 or j.getID() == 0:
continue
elif radio.getLossRate(i.getID(), j.getID()) < 1.0:
s += str(radio.getLossRate(i.getID(),j.getID())) + end
pf.write(s)
pf.flush()
pf.close()
| [
"[email protected]"
] | |
fc49994cbf7356c6fd241ebfa3d48ca03c7d5983 | f0a5ad7b8aa39f51f233391fead0da3eabecc4ee | /.history/toolbox/tradaExtract_20191128085816.py | a1f0a049b0449f364b7c3a9c579677dbaf4a3ae4 | [] | no_license | OseiasBeu/webScrapping | e0a524847e55b24dbbd3d57bbe7fa43b4e101f48 | 1e72c7551aea355a891043baecfcbab8a89e719a | refs/heads/master | 2022-10-25T18:12:50.858653 | 2020-06-18T01:29:24 | 2020-06-18T01:29:24 | 224,681,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from bs4 import BeautifulSoup
arr = [['#', 'clienteEstado', 'warehouseId', 'Pendentes', 'de', 'integrao'], ['1', 'SP', '2404', '48'], ['2', 'SP', '2462', '10'], ['3', 'SP', '7100', '7'], ['4', 'MG', 'BR19_A002', '6'], ['5', 'SP', 'BR19_A002', '6'], ['6', 'PE', 'BR19_A002', '5'], ['7', 'SP', '2444', '3'], ['8', 'MG', '7100', '2'], ['9', 'RJ', 'BR19_A002', '2'], ['10', 'BA', 'BR19_A002', '2'], ['11', 'MG', '0', '1'], ['12', 'SP', '7134', '1'], ['13', 'SP', '7136', '1'], ['14', 'SP', 'BR1F_A002', '1']]
soup = BeautifulSoup(arr).encode("utf-8")
print(arr) | [
"[email protected]"
] | |
6171b8e1aaffc27ebb5b2e594409e8ce47552e37 | ae9d32213e4ab423965e4a7f3ba1e6abfea85817 | /PreplotCalculator.py | 93599d5d0fedb8bd01a8babfdb6fcdffc49ae537 | [] | no_license | syntaxnoob/SpawnerDistance | 9e8d68123a8eb6835cff33f991b12bb153fb0858 | a07767d5e9358bb2b1efde171ee4a5c297302933 | refs/heads/master | 2022-07-31T08:24:35.172896 | 2020-05-23T16:22:09 | 2020-05-23T16:22:09 | 263,573,361 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | #!/bin/python3
import math
import pandas as pd
### Variabel ###
# spawner coordinates (Xcoordinate, Ycoordinate, Zcoordinate)
Spawners = [(370, 28, 886), (365, 37, 945), (359, 39, 917), (381, 42, 917),
(351, 44, 931), (362, 44, 891), (408, 44, 927), (429, 35, 897)]
Bigsum = 0
Distancelist = [] # List with Blockindex and Distances
Blocklist = [] # List with Blockindex and X/Y/Z coordinates
Sumlist = [] # List with Distances
Blockindex = -3 # Blockindex is the index for the searched block
maxdistance = 16 # Max distance from player to spawner
Xcoords = []
Ycoords = []
Zcoords = []
bestlist = [] # List of blockindexes
goedblok = [] # List of bestlist blocks
### Find Search area ###
for d in Spawners:
Xcoords.append(d[0])
Ycoords.append(d[1])
Zcoords.append(d[2])
Xcoords.sort()
Ycoords.sort()
Zcoords.sort()
minX = Xcoords[0]
minY = Ycoords[0]
minZ = Zcoords[0]
maxX = Xcoords[-1]
maxY = Ycoords[-1]
maxZ = Zcoords[-1]
# Could be optimized
### Brute force the shortest distance ###
for i in range(minX, maxX): # Xcoords Loop
Blockindex = Blockindex + 1
for j in range(minY, maxY): # Ycoords Loop
Blockindex = Blockindex + 1
for k in range(minZ, maxZ): # Zcoords Loop
Blockindex = Blockindex + 1
for l in range(0, 7):
# Pythagorean.
distance = math.sqrt(
math.pow((Spawners[l][0] - i), 2) + math.pow((Spawners[l][1] - j), 2) + math.pow((Spawners[l][2] - k), 2))
if (distance > maxdistance):
# Later used to calculate the amount of spawners that will be activated.
Bigsum = 1000000 + Bigsum
else: # Distance is allways positive
Bigsum = distance + Bigsum
Distancelist.append(Blockindex)
Distancelist.append(Bigsum)
Sumlist.append(Bigsum)
Blocklist.append(Blockindex)
Blocklist.append(i)
Blocklist.append(j)
Blocklist.append(k)
Bigsum = 0
Blockindex = Blockindex - 1
Blockindex = Blockindex - 1
Sumlist.sort()
print(Sumlist[0])
ID = (Distancelist.index(Sumlist[0]))
DI = Blocklist.index(ID)
print ("The block that is closest to all spawners is:", Blocklist[DI + 1], ",",
Blocklist[DI + 2], ",", Blocklist[DI + 3], ".", "And you activate:", round((7000000 - Distancelist[ID]) / 1000000), "Spawners.")
for i in range(len(Distancelist)):
if (Distancelist[i] > 1000000):
if (Distancelist[i] < 5000000):
bestlist.append(Distancelist[(i - 1)])
else:
continue
else:
continue
### Bestlist is GOED, niet aankomen ###
for v in range(len(bestlist)):
if(v == (len(bestlist) - 1)):
break
else:
for w in range(len(Blocklist)):
if (bestlist[v] == Blocklist[w]):
goedblok.append(Blocklist[(w + 1):(w + 4)])
break
else:
continue
print("blocks dat 3 spawners activeren: ", len(bestlist))
pd.DataFrame(goedblok).to_csv("3spawner.csv", index=False)
| [
"[email protected]"
] | |
3b497b13bfb03c08d8605c64566caeff353afe1f | a1aadb13c35f2a3fb27078090e5a582a3ea462f1 | /devel/py-pyobjc-core/patches/patch-setup.py | f046aa0efda0c7712c4171148edac369e6c807f7 | [] | no_license | fidelix-project/pkgsrc | 702346ca3a74b3dced9de29b07d342154466d1bd | 8a6673aa3e19b8604d2077015dc4673304399afc | refs/heads/master | 2022-11-06T04:48:33.983672 | 2020-06-28T14:06:28 | 2020-06-28T14:06:28 | 273,759,036 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | $NetBSD: patch-setup.py,v 1.1 2019/11/18 17:05:06 adam Exp $
Do not add debug symbols.
Do not override compiler optimiztion flags.
--- setup.py.orig 2019-11-18 16:02:47.000000000 +0000
+++ setup.py
@@ -66,7 +66,6 @@ def get_sdk_level(sdk):
# CFLAGS for the objc._objc extension:
CFLAGS = [
- "-g",
"-fexceptions",
# Loads of warning flags
"-Wall",
@@ -137,7 +136,7 @@ if get_config_var("Py_DEBUG"):
elif isinstance(cfg_vars[k], str) and "-O3" in cfg_vars[k]:
cfg_vars[k] = cfg_vars[k].replace("-O3", "-O1 -g")
-else:
+elif False:
# Enable -O4, which enables link-time optimization with
# clang. This appears to have a positive effect on performance.
cfg_vars = get_config_vars()
| [
"[email protected]"
] | |
ca46bb856d561d725345a0a14058c5877a4cac0e | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/d4rl/test1.py | 99515aca2a2dba3519cd10dc424cb31a7cf4af19 | [
"MIT"
] | permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,000 | py | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.hdf5_path_loader import HDF5PathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
if __name__ == "__main__":
variant = dict(
num_epochs=101,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(2E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=HDF5PathLoader,
path_loader_kwargs=dict(),
add_env_demos=False,
add_env_offpolicy_data=False,
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=False,
load_env_dataset_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
# 'env': ["pen-sparse-v0", "door-sparse-v0"],
'env': ["halfcheetah-mixed-v0", "walker2d-mixed-v0", "hopper-mixed-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, 0.3, 1.0, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
# 'trainer_kwargs.clip_score': [0.5, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
# 'qf_kwargs.output_activation': [Clamp(max=0)],
# 'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| [
"[email protected]"
] | |
5022b105c714e2dc4421650a004f69e753e7f87b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_unbarring.py | 324d198051173b711ebc3f517ecffc2d0ffdcc48 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py |
#calss header
class _UNBARRING():
def __init__(self,):
self.name = "UNBARRING"
self.definitions = unbar
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['unbar']
| [
"[email protected]"
] | |
f161dfd93a9ee6f9e40d4a3e791a7fd91f35b6f9 | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_12261.py | 6048a7e7b0d30415153952be2d88ff3f533b7b1a | [] | no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | # weird django file upload error
enctype="multipart/form-data"
| [
"[email protected]"
] | |
2520af0b5128fb372cc2fef73350890249d44869 | 5396a46275e52bfc972f05097e925742d5bbf2d1 | /_2016/eola/thumbnails.py | 9bc1e91212c3f71dae4f75496806adaf7814e43c | [
"MIT"
] | permissive | 3b1b/videos | 6ab0e4fe0fb07d15b5455f8726131a880437c42c | e841b1410fdda2d3bddb7cfa12ce070a3b66a026 | refs/heads/master | 2023-08-29T01:37:23.424512 | 2023-08-16T03:35:03 | 2023-08-16T03:35:03 | 325,873,493 | 4,601 | 1,868 | null | 2023-03-30T08:15:37 | 2020-12-31T21:07:33 | Python | UTF-8 | Python | false | false | 4,029 | py | from manim_imports_ext import *
from _2016.eola.chapter9 import Jennifer, You
class Chapter0(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"t_matrix" : [[3, 1], [2, -1]]
}
def construct(self):
self.setup()
self.plane.fade()
for mob in self.get_mobjects():
mob.set_stroke(width = 6)
self.apply_transposed_matrix(self.t_matrix, run_time = 0)
class Chapter1(Scene):
def construct(self):
arrow = Vector(2*UP+RIGHT)
vs = OldTexText("vs.")
array = Matrix([1, 2])
array.set_color(TEAL)
everyone = VMobject(arrow, vs, array)
everyone.arrange(RIGHT, buff = 0.5)
everyone.set_height(4)
self.add(everyone)
class Chapter2(LinearTransformationScene):
def construct(self):
self.lock_in_faded_grid()
vectors = VMobject(*[
Vector([x, y])
for x in np.arange(-int(FRAME_X_RADIUS)+0.5, int(FRAME_X_RADIUS)+0.5)
for y in np.arange(-int(FRAME_Y_RADIUS)+0.5, int(FRAME_Y_RADIUS)+0.5)
])
vectors.set_submobject_colors_by_gradient(PINK, BLUE_E)
words = OldTexText("Span")
words.scale(3)
words.to_edge(UP)
words.add_background_rectangle()
self.add(vectors, words)
class Chapter3(Chapter0):
CONFIG = {
"t_matrix" : [[3, 0], [2, -1]]
}
class Chapter4p1(Chapter0):
CONFIG = {
"t_matrix" : [[1, 0], [1, 1]]
}
class Chapter4p2(Chapter0):
CONFIG = {
"t_matrix" : [[1, 2], [-1, 1]]
}
class Chapter5(LinearTransformationScene):
def construct(self):
self.plane.fade()
self.add_unit_square()
self.plane.set_stroke(width = 6)
VMobject(self.i_hat, self.j_hat).set_stroke(width = 10)
self.square.set_fill(YELLOW, opacity = 0.7)
self.square.set_stroke(width = 0)
self.apply_transposed_matrix(self.t_matrix, run_time = 0)
class Chapter9(Scene):
def construct(self):
you = You()
jenny = Jennifer()
you.change_mode("erm")
jenny.change_mode("speaking")
you.shift(LEFT)
jenny.shift(2*RIGHT)
vector = Vector([3, 2])
vector.center().shift(2*DOWN)
vector.set_stroke(width = 8)
vector.tip.scale(2)
you.coords = Matrix([3, 2])
jenny.coords = Matrix(["5/3", "1/3"])
for pi in jenny, you:
pi.bubble = pi.get_bubble(SpeechBubble, width = 3, height = 3)
if pi is you:
pi.bubble.shift(MED_SMALL_BUFF*RIGHT)
else:
pi.coords.scale(0.8)
pi.bubble.shift(MED_SMALL_BUFF*LEFT)
pi.bubble.add_content(pi.coords)
pi.add(pi.bubble, pi.coords)
pi.look_at(vector)
self.add(you, jenny, vector)
class Chapter10(LinearTransformationScene):
CONFIG = {
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_HEIGHT,
"secondary_line_ratio" : 1
},
"include_background_plane" : False,
}
def construct(self):
v_tex = "\\vec{\\textbf{v}}"
eq = OldTex("A", v_tex, "=", "\\lambda", v_tex)
eq.set_color_by_tex(v_tex, YELLOW)
eq.set_color_by_tex("\\lambda", MAROON_B)
eq.scale(3)
eq.add_background_rectangle()
eq.shift(2*DOWN)
title = OldTexText(
"Eigen", "vectors \\\\",
"Eigen", "values"
, arg_separator = "")
title.scale(2.5)
title.to_edge(UP)
# title.set_color_by_tex("Eigen", MAROON_B)
title[0].set_color(YELLOW)
title[2].set_color(MAROON_B)
title.add_background_rectangle()
self.add_vector([-1, 1], color = YELLOW, animate = False)
self.apply_transposed_matrix([[3, 0], [1, 2]])
self.plane.fade()
self.remove(self.j_hat)
self.add(eq, title)
| [
"[email protected]"
] | |
ccf55017fbc1f4207985eaa80f14722daf999f3e | 269feb0a04e10df899b7cf0d37c42fd295fd6ac0 | /5_三角形斜边长.2.py | 5c697de2b506b4189fc2744884b4f1700bf259d0 | [] | no_license | zhangxingxing12138/card | c0134951ded50b7cb8c129c28e07252f35796052 | 793de5c5546143b59f8fd169a4e0c2cea1a5b416 | refs/heads/master | 2020-03-23T11:45:29.070458 | 2018-10-16T00:15:01 | 2018-10-16T00:15:01 | 141,519,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | a=input("直角边a:")
b=input("直角边b:")
A=float(a)
B=float(b)
c=(A*A+B*B)**(1/2)
print(c)
| [
"[email protected]"
] | |
5cc0139aa5321db4c991af5ca4902a1878f8d7f1 | ec1deb682fb96a1f937f2fca5f161aa951462876 | /unittestPython/part_1/name_function.py | 61209de86dc7aec85c8f1a819784981abebebc0c | [] | no_license | AnatoliKosarev/Python-beginner-course--Teclado- | 31d82f5e9a1f39e2970323bed9de1fd539990565 | fa91199938d6975b5874341585343566caaf3600 | refs/heads/main | 2023-06-30T12:14:33.779827 | 2021-07-24T11:16:19 | 2021-07-24T11:16:19 | 376,371,590 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def get_formatted_name(first, last, middle=""): # middle name is optional
if middle:
full_name = f"{first} {middle} {last}"
else:
full_name = f"{first} {last}"
return full_name.title()
| [
"[email protected]"
] | |
db09f5e6aeb8defe8a7c9c365689f0ee46b07dc4 | 2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac | /Dsz/PyScripts/Lib/dsz/mca/network/cmd/banner/errors.py | 3dffd24a2b423eab69b50b74ee3889931f22a361 | [] | no_license | FingerLeakers/DanderSpritz_docs | f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364 | d96b6a71c039b329f9f81544f645857c75360e7f | refs/heads/master | 2021-01-25T13:05:51.732149 | 2018-03-08T01:22:49 | 2018-03-08T01:22:49 | 123,527,268 | 2 | 0 | null | 2018-03-02T03:48:31 | 2018-03-02T03:48:30 | null | UTF-8 | Python | false | false | 1,606 | py | # uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: errors.py
import mcl.status
ERR_SUCCESS = mcl.status.MCL_SUCCESS
ERR_INVALID_PARAM = mcl.status.framework.ERR_START
ERR_CALLBACK_FAILED = mcl.status.framework.ERR_START + 1
ERR_MARSHAL_FAILED = mcl.status.framework.ERR_START + 2
ERR_SOCKET_INIT_FAILURE = mcl.status.framework.ERR_START + 3
ERR_SOCKET_BIND_FAILURE = mcl.status.framework.ERR_START + 4
ERR_SOCKET_OPTION_FAILURE = mcl.status.framework.ERR_START + 5
ERR_CONNECT_FAILURE = mcl.status.framework.ERR_START + 6
ERR_SEND_FAILURE = mcl.status.framework.ERR_START + 7
ERR_PACKET_TOO_LARGE = mcl.status.framework.ERR_START + 8
ERR_RECV_ERROR = mcl.status.framework.ERR_START + 9
ERR_RECV_TIMEOUT = mcl.status.framework.ERR_START + 10
ERR_NOT_IMPLEMENTED = mcl.status.framework.ERR_START + 11
errorStrings = {ERR_INVALID_PARAM: 'Invalid parameter(s)',
ERR_CALLBACK_FAILED: 'Error making callback',
ERR_MARSHAL_FAILED: 'Marshaling data failed',
ERR_SOCKET_INIT_FAILURE: 'Socket initialization failed',
ERR_SOCKET_BIND_FAILURE: 'Failed to bind to given source port',
ERR_SOCKET_OPTION_FAILURE: 'Failed to set socket option',
ERR_CONNECT_FAILURE: 'Connect request failed',
ERR_SEND_FAILURE: 'Send failed',
ERR_PACKET_TOO_LARGE: 'The given packet is too large to send',
ERR_RECV_ERROR: 'Error receiving data',
ERR_RECV_TIMEOUT: 'Timeout waiting for data',
ERR_NOT_IMPLEMENTED: 'Not implemented on this platform'
} | [
"[email protected]"
] | |
447fc54eea01a339401254a7ab9eea6548c5d5d1 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/intentions/PyInvertIfConditionIntentionTest/generalNoElseTry.py | 8071d065e802d90e83cc718813bbe0e7adcdde7c | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 207 | py | def func():
value = "not-none"
<caret>if value is None:
print("None")
return
try:
return int(value)
except ValueError:
raise RuntimeError("Value is not int") | [
"[email protected]"
] | |
1fb7e441f0b01c1a959827aa4ff80c6bf4ced77a | a24cedf9dea47ba64fbf779a8c18f39bd9a196cf | /halo_roller/urls.py | a81861188bce43e27eb6deda9e1867e535029ead | [] | no_license | rkuykendall/halo-roller | e795d19e0d1beef8a91a9bf417ce2c4908b1666b | 73b5ec77fc4070c4bf9694ffe99497ab3abb39f7 | refs/heads/master | 2022-12-18T16:10:35.798073 | 2020-09-23T22:12:01 | 2020-09-23T22:12:01 | 297,977,249 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | """halo_roller URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('games.urls')),
]
| [
"[email protected]"
] | |
971d6c7a8b93db04103d5493b66aab379de626ae | 2794764ddbe9daf666601014cb84e5ca7b6ca7c3 | /Account/urls.py | d1d10c86cebf2fd2a839bfcf8f84f540ce97c97e | [] | no_license | aydanaderi/goldoon | 5b7341f1b94cb607bcc7b895fe22a6affb817cd7 | 3f4cc6a526eae70f55833d0b07d5209b243aff20 | refs/heads/main | 2023-01-19T16:12:22.837854 | 2020-11-26T15:46:24 | 2020-11-26T15:46:24 | 311,077,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 638 | py | from django.urls import path
from knox import views as knox_views
from . import views
urlpatterns = [
path('signup/', views.RegisterAPI.as_view(), name = 'register'),
path('login/', views.LoginAPI.as_view(), name = 'login'),
path('logout/', knox_views.LogoutView.as_view(), name = 'logout'),
path('change_password/', views.ChangePasswordView.as_view(), name = 'change-password'),
path('reset/', views.ResetPasswodView, name = 'Reset_Password'),
path('<int:username_id>/reset/confirm/', views.ConfirmResetPasswodView , name = 'confirm_Reset_password'),
path('profile/', views.ProfileView, name = 'profile'),
]
| [
"[email protected]"
] | |
d2abb6f2ca0db30eff1b7c9cd045858a1b1837b6 | 46adba1fe06298743f018abd0096c753256ac03a | /src/procgraph_vehicles/cairo_map_display.py | d05071495667e74265c75eb29020abf49801f486 | [] | no_license | ajaycharan/vehicles | 9e0f21a2550c56e83303329c1bdf8c40bde5c0eb | 2cfe467422160f90bc76800216ac42c0f13f2c4d | refs/heads/master | 2021-05-28T21:03:35.466743 | 2013-07-19T06:59:51 | 2013-07-19T06:59:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,686 | py | from cairo_utils import cairo_pixels, cairo_text_align
from geometry import SE2_from_SE3, SE3
from procgraph import BadConfig, Block
from procgraph.block_utils import make_sure_dir_exists
from procgraph_images import posneg, scale, reshape2d
from vehicles_cairo import (cairo_save, cairo_transform,
vehicles_cairo_display_all, cairo_rototranslate, cairo_ref_frame)
import numpy as np
import os
import subprocess
class VehiclesCairoDisplay(Block):
''' Produces a top-down plot of a circular arena. '''
Block.alias('vehicles_cairo_display')
Block.config('format', 'pdf|png', default='pdf')
Block.config('file', 'Output file (pdf)', default=None)
Block.output('rgb', 'RGB data (png)')
Block.config('transparent', 'Outputs RGB with transparent bg',
default=False)
Block.config('width', 'Image width in points.', default=600)
Block.config('height', 'Image height in points.', default=600)
Block.config('sidebar_width', default=200)
# Sidebar options
Block.config('display_sidebar', default=True)
Block.config('trace', 'Trace the path', default=False)
Block.config('plotting_params',
'Configuration to pass to vehicles_cairo_display_all()',
default={})
Block.config('sidebar_params',
'Configuration to pass to create_sidebar()',
default={})
Block.config('swf', 'Converts PDF to SWF using pdf2swf', default=True)
Block.input('boot_obs', '')
def get_shape(self):
w = self.config.width
if self.config.display_sidebar:
w += self.config.sidebar_width
h = self.config.height
return (w, h)
def init(self):
self.format = self.config.format
(w, h) = self.get_shape()
self.total_width = w
self.total_height = h
self.frame = 0
if self.format == 'pdf':
self.init_pdf()
elif self.format == 'png':
self.init_png()
else:
raise BadConfig('Invalid format %r.' % self.format, self, 'format')
self.count = 0
self.fps = None
self.t0 = None
self.tmp_cr = None
def init_pdf(self):
self.filename = self.config.file
self.tmp_filename = self.filename + '.active'
make_sure_dir_exists(self.filename)
self.info("Creating file %r." % self.filename)
import cairo
self.surf = cairo.PDFSurface(self.tmp_filename, # @UndefinedVariable
self.total_width,
self.total_height)
def init_png(self):
import cairo
w, h = self.total_width, self.total_height
# note (w,h) here and (h,w,h*4) below; I'm not sure but it works
self.argb_data = np.empty((h, w, 4), dtype=np.uint8)
self.argb_data.fill(255)
self.surf = cairo.ImageSurface.create_for_data(# @UndefinedVariable
self.argb_data,
cairo.FORMAT_ARGB32, # @UndefinedVariable
w, h, w * 4)
def update(self):
# Estimate fps
if self.count == 0:
self.t0 = self.get_input_timestamp(0)
if self.count >= 1:
delta = self.get_input_timestamp(0) - self.t0
self.fps = 1.0 * self.count / delta
self.count += 1
if self.format == 'pdf':
self.update_pdf()
elif self.format == 'png':
self.update_png()
else:
assert False
def update_png(self):
import cairo
cr = cairo.Context(self.surf) # @UndefinedVariable
self.draw_everything(cr)
self.surf.flush()
if not self.config.transparent:
rgb = self.argb_data[:, :, :3].copy()
# fix red/blue inversion
rgb[:, :, 0] = self.argb_data[:, :, 2]
rgb[:, :, 2] = self.argb_data[:, :, 0]
assert rgb.shape[2] == 3
else:
rgb = self.argb_data.copy()
# fix red/blue inversion
rgb[:, :, 0] = self.argb_data[:, :, 2]
rgb[:, :, 2] = self.argb_data[:, :, 0]
assert rgb.shape[2] == 4
self.output.rgb = rgb
def update_pdf(self):
import cairo
# If I don't recreate it, it will crash
cr = cairo.Context(self.surf) # @UndefinedVariable
if not self.config.transparent:
# Set white background
bg_color = [1, 1, 1]
cr.rectangle(0, 0, self.total_width, self.total_height)
cr.set_source_rgb(bg_color[0], bg_color[1], bg_color[2])
cr.fill()
else:
# Green screen :-)
cr.rectangle(0, 0, self.total_width, self.total_height)
cr.set_source_rgba(0, 1, 0, 0)
cr.fill()
self.draw_everything(cr)
self.surf.flush()
self.surf.show_page() # Free memory self.cr?
def draw_everything(self, cr):
boot_obs = self.input.boot_obs
if 'id_episode' in boot_obs:
id_episode = boot_obs['id_episode'].item()
else:
id_episode = ''
id_vehicle = boot_obs['id_robot'].item()
if 'extra' in boot_obs:
extra = boot_obs['extra'].item()
else:
extra = {}
def extra_draw_world(cr):
if 'servonav' in extra:
plot_servonave(cr, extra['servonav'])
if 'servoing_poses' in extra:
plot_servoing_poses(cr, extra['servoing_poses'])
plotting_params = self.config.plotting_params
plotting_params['extra_draw_world'] = extra_draw_world
sidebar_params = self.config.sidebar_params
# todo: check
sim_state = extra['robot_state']
observations_values = boot_obs['observations']
commands = boot_obs['commands']
commands_source = boot_obs['commands_source'].item()
timestamp = boot_obs['time_from_episode_start'].item()
with cairo_save(cr):
if self.config.display_sidebar:
padding = 0.03 * self.config.width
map_width = self.config.width - 2 * padding
map_height = self.config.height - 2 * padding
cr.translate(padding, padding)
else:
map_width = self.config.width
map_height = self.config.height
with cairo_save(cr):
cr.rectangle(0, 0, map_width, map_height)
cr.clip()
# TODO: implement trace
vehicles_cairo_display_all(cr,
map_width,
map_height,
sim_state,
**plotting_params)
if self.config.display_sidebar:
cr.set_line_width(1)
cr.set_source_rgb(0, 0, 0)
cr.rectangle(0, 0, map_width, map_height)
cr.stroke()
if self.config.display_sidebar:
with cairo_transform(cr, t=[self.config.width, 0]):
create_sidebar(cr, width=self.config.sidebar_width,
height=self.config.height,
sim_state=sim_state,
id_vehicle=id_vehicle,
id_episode=id_episode,
timestamp=timestamp,
observations_values=observations_values,
commands_values=commands,
commands_source=commands_source,
**sidebar_params)
def finish(self):
if self.format == 'pdf':
self.finish_pdf()
def finish_pdf(self):
self.surf.finish()
if os.path.exists(self.filename):
os.unlink(self.filename)
if os.path.exists(self.tmp_filename):
os.rename(self.tmp_filename, self.filename)
if self.config.swf:
if self.fps is None:
self.error('Only one frame seen?')
else:
basename, _ = os.path.splitext(self.filename)
swf = '%s.swf' % basename
try:
command = ['pdf2swf',
# "-b", # --defaultviewer
# "-l", # --defaultloader
'-G', # flatten
'-s', 'framerate=%d' % self.fps,
self.filename,
'-o', swf]
self.info(' '.join(command))
subprocess.check_call(command)
except Exception as e:
self.error('Could not convert to swf: %s' % e)
if os.path.exists(swf):
os.unlink(swf)
self.info("Completed %r." % self.filename)
class VehiclesDisplay(VehiclesCairoDisplay):
''' Produces a top-down plot of a circular arena. '''
Block.alias('vehicles_cairo_display_all')
Block.config('format', 'pdf|png', default='pdf')
Block.config('file', 'Output file (pdf)', default=None)
Block.output('rgb', 'RGB data (png)')
Block.config('transparent', 'Outputs RGB with transparent bg',
default=False)
Block.config('width', 'Image width in points.', default=600)
Block.config('height', 'Image height in points.', default=600)
Block.config('trace', 'Trace the path', default=False)
Block.config('plotting_params',
'Configuration to pass to vehicles_cairo_display_all()',
default={})
Block.config('swf', 'Converts PDF to SWF using pdf2swf', default=True)
Block.input('boot_obs')
def get_shape(self):
w = self.config.width
h = self.config.height
return (w, h)
def draw_everything(self, cr):
sim_state = self.input.boot_obs
map_width = self.config.width
map_height = self.config.height
plotting_params = self.config.plotting_params
with cairo_save(cr):
cr.rectangle(0, 0, map_width, map_height)
cr.clip()
# TODO: implement trace
vehicles_cairo_display_all(cr,
map_width,
map_height,
sim_state,
**plotting_params)
def create_sidebar(cr, width, height, sim_state, id_vehicle, id_episode, # @UnusedVariable
timestamp, observations_values,
commands_values, commands_source,
bg_color=None,
show_observations=True,
show_commands=True,
show_annotations=True):
if len(commands_values.shape) == 1:
commands_values = np.array([commands_values.tolist()])
commands_rgb = posneg(commands_values,
max_value=(+1), # not sure +1
nan_color=[1, 1, 1])
observations_rgb = scale(reshape2d(observations_values), min_value=0,
nan_color=[1, 1, 1])
import cairo
if bg_color is not None:
cr.rectangle(0, 0, width, height)
cr.set_source_rgb(bg_color[0], bg_color[1], bg_color[2])
cr.fill()
fo = cairo.FontOptions() # @UndefinedVariable
fo.set_hint_style(cairo.HINT_STYLE_FULL) # @UndefinedVariable
fo.set_antialias(cairo.ANTIALIAS_GRAY) # @UndefinedVariable
cr.set_font_options(fo)
# M = width / 20.0
M = width / 15.0
legend_font_size = M * 0.75
details_font_size = M
label_font = 'Mono'
legend_font = 'Serif'
cr.set_source_rgb(0, 0, 0)
padding_fraction = 0.1
padding = width * padding_fraction
nvalues = 128
bar_width = 0.4 * width
bar_ratio = 0.15
bar_height = bar_width * bar_ratio
spacer = 0.05 * width
values = np.linspace(-1, +1, nvalues)
values = np.vstack([values] * 1)
colorbar_posneg = posneg(values)
values = np.linspace(-1, +1, nvalues)
values = np.vstack([values] * 1)
colorbar_scale = scale(values)
cr.translate(0, 2 * M)
if show_observations:
with cairo_transform(cr, t=[width / 2, 0]):
cr.select_font_face(label_font)
cr.set_font_size(M)
cairo_text_align(cr, 'observations', halign='center')
cr.translate(0, M * 0.8)
with cairo_transform(cr, t=[padding, 0]):
data_width = width - 2 * padding
# Don't draw grid if there are many pixels
if max(observations_rgb.shape[0], observations_rgb.shape[1]) > 15:
grid_color = None
else:
grid_color = [1, .9, .9]
last_height = cairo_pixels(cr, observations_rgb, width=data_width,
# Force square
height=data_width,
grid_color=grid_color)
cr.translate(0, last_height)
cr.translate(0, spacer)
with cairo_transform(cr, t=[width / 2, 0]):
with cairo_transform(cr, t=[-bar_width / 2, 0]):
last_height = cairo_pixels(cr, colorbar_scale,
bar_width, height=bar_height,
grid_color=None)
cr.set_font_size(legend_font_size)
cr.select_font_face(legend_font)
with cairo_transform(cr, t=[0, bar_height / 2]):
with cairo_transform(cr, t=[-bar_width / 2 - M / 2, 0]):
cairo_text_align(cr, '0', 'right', 'middle')
with cairo_transform(cr, t=[+bar_width / 2 + M / 2, 0]):
cairo_text_align(cr, '1', 'left', 'middle')
cr.translate(0, last_height + spacer * 3)
if show_commands:
with cairo_transform(cr, t=[width / 2, 0]):
cr.select_font_face(label_font)
cr.set_font_size(M)
cairo_text_align(cr, 'commands', halign='center')
cr.translate(0, M * 0.8)
padding = padding * 2
with cairo_transform(cr, t=[padding, 0]):
data_width = width - 2 * padding
last_height = cairo_pixels(cr, commands_rgb, data_width)
cr.translate(0, last_height)
cr.translate(0, spacer)
with cairo_transform(cr, t=[width / 2, 0]):
with cairo_transform(cr, t=[-bar_width / 2, 0]):
last_height = cairo_pixels(cr, colorbar_posneg,
bar_width, height=bar_width * bar_ratio,
grid_color=None)
cr.set_font_size(legend_font_size)
cr.select_font_face(legend_font)
with cairo_transform(cr, t=[0, bar_height / 2]):
with cairo_transform(cr, t=[-bar_width / 2 - M / 2, 0]):
cairo_text_align(cr, '-1', 'right', 'middle')
with cairo_transform(cr, t=[+bar_width / 2 + M / 2, 0]):
cairo_text_align(cr, '+1', 'left', 'middle')
cr.translate(0, last_height + spacer * 2)
if show_annotations:
cr.translate(width / 10, 0)
strings = ['vehicle: %s' % id_vehicle,
' agent: %s' % commands_source,
'episode: %s' % id_episode,
' time: %6.2f' % timestamp,
]
cr.select_font_face('Mono')
max_len = max(len(x) for x in strings)
padding = 5
font_size = 1.6 * width / (max_len + padding)
cr.set_font_size(font_size)
line = details_font_size * 1.2
for s in strings:
with cairo_save(cr):
cr.show_text(s)
cr.stroke()
cr.translate(0, line)
def plot_servoing_poses(cr, servoing_poses):
# TODO
goal = SE3.from_yaml(servoing_poses['goal'])
with cairo_rototranslate(cr, goal):
cairo_ref_frame(cr, l=0.5)
def plot_servonave(cr, servonav):
locations = servonav['locations']
# current_goal = servonav['current_goal']
for _, loc in enumerate(locations):
pose = SE2_from_SE3(SE3.from_yaml(loc['pose']))
with cairo_rototranslate(cr, pose):
# if current_goal == i:
# cairo_ref_frame(cr, l=0.5)
# else:
grey = [.6, .6, .6]
cairo_ref_frame(cr, l=0.5, x_color=grey, y_color=grey)
| [
"[email protected]"
] | |
1c57bba12ea1d28e3d22c8f069be2ea6fb0a8d9d | aca4f00c884e1d0e6b2978512e4e08e52eebd6e9 | /2021/atcoder.jp/abc/196/prob.py | 561d92c060025a984e9491c8ceafd39586a1b707 | [] | no_license | jki14/competitive-programming | 2d28f1ac8c7de62e5e82105ae1eac2b62434e2a4 | ba80bee7827521520eb16a2d151fc0c3ca1f7454 | refs/heads/master | 2023-08-07T19:07:22.894480 | 2023-07-30T12:18:36 | 2023-07-30T12:18:36 | 166,743,930 | 2 | 0 | null | 2021-09-04T09:25:40 | 2019-01-21T03:40:47 | C++ | UTF-8 | Python | false | false | 400 | py | from math import floor
from sys import stderr, stdout
def solution(s):
p = s.find('.')
if p == -1:
p = len(s)
stdout.write('%d\n' % int(s[:p]))
def main():
while True:
try:
s = raw_input().strip()
solution(s)
except EOFError:
break
except ValueError:
continue
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
4e4b7278b5d85aced09f29bfe8d49d79fc5fb567 | c1ee8f22ece4fc39cb94fe19832fcba8e45cf5bc | /프로그래머스/문자열 내 마음대로 정렬하기.py | a45fa146443052022e2644fb242635aa218465d9 | [] | no_license | JeongHanJun/BOJ | ae6b1c64c5b3226deef2708ae447aa1225333a92 | a865624fb0a9291b68f99af8535f708554fa0b41 | refs/heads/master | 2023-03-31T02:22:58.974437 | 2021-04-02T02:43:57 | 2021-04-02T02:43:57 | 258,809,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | # 문자열 내 마음대로 정렬하기
# 제목부터 sorted, key, lambda 가 떠오른다.
def solution(strings, n):
answer = sorted(strings, key = lambda x : (x[n], x))
return answer
s1 = ["sun", "bed", "car"]
n1 = 1
s2 = ["abce", "abcd", "cdx"]
n2 = 2
print(solution(s1, n1))
print(solution(s2, n2)) | [
"[email protected]"
] | |
7faf21b3d81b85edbd984555f7dd773edd9447b0 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/desktopvirtualization/workspace.py | 84777b1001b9f8903ed1e5875ed1dbd416496651 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,931 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['WorkspaceArgs', 'Workspace']
@pulumi.input_type
class WorkspaceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetIdentityArgs']] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetPlanArgs']] = None,
sku: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetSkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Workspace resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Sequence[pulumi.Input[str]]] application_group_references: List of applicationGroup resource Ids.
:param pulumi.Input[str] description: Description of Workspace.
:param pulumi.Input[str] friendly_name: Friendly name of Workspace.
:param pulumi.Input[str] kind: Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] managed_by: The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if application_group_references is not None:
pulumi.set(__self__, "application_group_references", application_group_references)
if description is not None:
pulumi.set(__self__, "description", description)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if kind is not None:
pulumi.set(__self__, "kind", kind)
if location is not None:
pulumi.set(__self__, "location", location)
if managed_by is not None:
pulumi.set(__self__, "managed_by", managed_by)
if plan is not None:
pulumi.set(__self__, "plan", plan)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if workspace_name is not None:
pulumi.set(__self__, "workspace_name", workspace_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of applicationGroup resource Ids.
"""
return pulumi.get(self, "application_group_references")
@application_group_references.setter
def application_group_references(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "application_group_references", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of Workspace.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[pulumi.Input[str]]:
"""
Friendly name of Workspace.
"""
return pulumi.get(self, "friendly_name")
@friendly_name.setter
def friendly_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "friendly_name", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ResourceModelWithAllowedPropertySetIdentityArgs']]:
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def kind(self) -> Optional[pulumi.Input[str]]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> Optional[pulumi.Input[str]]:
"""
The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
"""
return pulumi.get(self, "managed_by")
@managed_by.setter
def managed_by(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "managed_by", value)
@property
@pulumi.getter
def plan(self) -> Optional[pulumi.Input['ResourceModelWithAllowedPropertySetPlanArgs']]:
return pulumi.get(self, "plan")
@plan.setter
def plan(self, value: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetPlanArgs']]):
pulumi.set(self, "plan", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ResourceModelWithAllowedPropertySetSkuArgs']]:
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ResourceModelWithAllowedPropertySetSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="workspaceName")
def workspace_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the workspace
"""
return pulumi.get(self, "workspace_name")
@workspace_name.setter
def workspace_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "workspace_name", value)
class Workspace(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetPlanArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Represents a Workspace definition.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] application_group_references: List of applicationGroup resource Ids.
:param pulumi.Input[str] description: Description of Workspace.
:param pulumi.Input[str] friendly_name: Friendly name of Workspace.
:param pulumi.Input[str] kind: Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
:param pulumi.Input[str] location: The geo-location where the resource lives
:param pulumi.Input[str] managed_by: The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[str] workspace_name: The name of the workspace
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WorkspaceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a Workspace definition.
API Version: 2021-02-01-preview.
:param str resource_name: The name of the resource.
:param WorkspaceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WorkspaceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
application_group_references: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
description: Optional[pulumi.Input[str]] = None,
friendly_name: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetIdentityArgs']]] = None,
kind: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
managed_by: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetPlanArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ResourceModelWithAllowedPropertySetSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
workspace_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["application_group_references"] = application_group_references
__props__.__dict__["description"] = description
__props__.__dict__["friendly_name"] = friendly_name
__props__.__dict__["identity"] = identity
__props__.__dict__["kind"] = kind
__props__.__dict__["location"] = location
__props__.__dict__["managed_by"] = managed_by
__props__.__dict__["plan"] = plan
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["workspace_name"] = workspace_name
__props__.__dict__["cloud_pc_resource"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:desktopvirtualization/v20190123preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20190924preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20191210preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20200921preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201019preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201102preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20201110preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210114preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210201preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210309preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210401preview:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210712:Workspace"), pulumi.Alias(type_="azure-native:desktopvirtualization/v20210903preview:Workspace")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Workspace, __self__).__init__(
'azure-native:desktopvirtualization:Workspace',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Workspace':
"""
Get an existing Workspace resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = WorkspaceArgs.__new__(WorkspaceArgs)
__props__.__dict__["application_group_references"] = None
__props__.__dict__["cloud_pc_resource"] = None
__props__.__dict__["description"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["friendly_name"] = None
__props__.__dict__["identity"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["managed_by"] = None
__props__.__dict__["name"] = None
__props__.__dict__["object_id"] = None
__props__.__dict__["plan"] = None
__props__.__dict__["sku"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
return Workspace(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="applicationGroupReferences")
def application_group_references(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of applicationGroup resource Ids.
"""
return pulumi.get(self, "application_group_references")
@property
@pulumi.getter(name="cloudPcResource")
def cloud_pc_resource(self) -> pulumi.Output[bool]:
"""
Is cloud pc resource.
"""
return pulumi.get(self, "cloud_pc_resource")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of Workspace.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
The etag field is *not* required. If it is provided in the response body, it must also be provided as a header per the normal etag convention. Entity tags are used for comparing two or more entities from the same requested resource. HTTP/1.1 uses entity tags in the etag (section 14.19), If-Match (section 14.24), If-None-Match (section 14.26), and If-Range (section 14.27) header fields.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> pulumi.Output[Optional[str]]:
"""
Friendly name of Workspace.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def identity(self) -> pulumi.Output[Optional['outputs.ResourceModelWithAllowedPropertySetResponseIdentity']]:
return pulumi.get(self, "identity")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[Optional[str]]:
"""
Metadata used by portal/tooling/etc to render different UX experiences for resources of the same type; e.g. ApiApps are a kind of Microsoft.Web/sites type. If supported, the resource provider must validate and persist this value.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The geo-location where the resource lives
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="managedBy")
def managed_by(self) -> pulumi.Output[Optional[str]]:
"""
The fully qualified resource ID of the resource that manages this resource. Indicates if this resource is managed by another Azure resource. If this is present, complete mode deployment will not delete the resource if it is removed from the template since it is managed by another resource.
"""
return pulumi.get(self, "managed_by")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> pulumi.Output[str]:
"""
ObjectId of Workspace. (internal use)
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter
def plan(self) -> pulumi.Output[Optional['outputs.ResourceModelWithAllowedPropertySetResponsePlan']]:
return pulumi.get(self, "plan")
@property
@pulumi.getter
def sku(self) -> pulumi.Output[Optional['outputs.ResourceModelWithAllowedPropertySetResponseSku']]:
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.