blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
797aaff83b73703d3f4c8ec7b645f7e0e382d611 | 59166105545cdd87626d15bf42e60a9ee1ef2413 | /dbpedia/models/birth.py | 86c075f122a90e61e71a1be57b5d74e77274a9d4 | [] | no_license | mosoriob/dbpedia_api_client | 8c594fc115ce75235315e890d55fbf6bd555fa85 | 8d6f0d04a3a30a82ce0e9277e4c9ce00ecd0c0cc | refs/heads/master | 2022-11-20T01:42:33.481024 | 2020-05-12T23:22:54 | 2020-05-12T23:22:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,442 | py | # coding: utf-8
"""
DBpedia
This is the API of the DBpedia Ontology # noqa: E501
The version of the OpenAPI document: v0.0.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from dbpedia.configuration import Configuration
class Birth(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'number_of_people_attending': 'list[int]',
'end_date': 'list[str]',
'description': 'list[str]',
'caused_by': 'list[object]',
'label': 'list[str]',
'type': 'list[str]',
'participant': 'list[str]',
'duration': 'list[float]',
'previous_event': 'list[object]',
'next_event': 'list[object]',
'id': 'str',
'following_event': 'list[object]',
'start_date': 'list[str]'
}
attribute_map = {
'number_of_people_attending': 'numberOfPeopleAttending',
'end_date': 'endDate',
'description': 'description',
'caused_by': 'causedBy',
'label': 'label',
'type': 'type',
'participant': 'participant',
'duration': 'duration',
'previous_event': 'previousEvent',
'next_event': 'nextEvent',
'id': 'id',
'following_event': 'followingEvent',
'start_date': 'startDate'
}
def __init__(self, number_of_people_attending=None, end_date=None, description=None, caused_by=None, label=None, type=None, participant=None, duration=None, previous_event=None, next_event=None, id=None, following_event=None, start_date=None, local_vars_configuration=None): # noqa: E501
"""Birth - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._number_of_people_attending = None
self._end_date = None
self._description = None
self._caused_by = None
self._label = None
self._type = None
self._participant = None
self._duration = None
self._previous_event = None
self._next_event = None
self._id = None
self._following_event = None
self._start_date = None
self.discriminator = None
self.number_of_people_attending = number_of_people_attending
self.end_date = end_date
self.description = description
self.caused_by = caused_by
self.label = label
self.type = type
self.participant = participant
self.duration = duration
self.previous_event = previous_event
self.next_event = next_event
if id is not None:
self.id = id
self.following_event = following_event
self.start_date = start_date
@property
def number_of_people_attending(self):
"""Gets the number_of_people_attending of this Birth. # noqa: E501
Description not available # noqa: E501
:return: The number_of_people_attending of this Birth. # noqa: E501
:rtype: list[int]
"""
return self._number_of_people_attending
@number_of_people_attending.setter
def number_of_people_attending(self, number_of_people_attending):
"""Sets the number_of_people_attending of this Birth.
Description not available # noqa: E501
:param number_of_people_attending: The number_of_people_attending of this Birth. # noqa: E501
:type: list[int]
"""
self._number_of_people_attending = number_of_people_attending
@property
def end_date(self):
"""Gets the end_date of this Birth. # noqa: E501
The end date of the event. # noqa: E501
:return: The end_date of this Birth. # noqa: E501
:rtype: list[str]
"""
return self._end_date
@end_date.setter
def end_date(self, end_date):
"""Sets the end_date of this Birth.
The end date of the event. # noqa: E501
:param end_date: The end_date of this Birth. # noqa: E501
:type: list[str]
"""
self._end_date = end_date
@property
def description(self):
"""Gets the description of this Birth. # noqa: E501
small description # noqa: E501
:return: The description of this Birth. # noqa: E501
:rtype: list[str]
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Birth.
small description # noqa: E501
:param description: The description of this Birth. # noqa: E501
:type: list[str]
"""
self._description = description
@property
def caused_by(self):
"""Gets the caused_by of this Birth. # noqa: E501
Description not available # noqa: E501
:return: The caused_by of this Birth. # noqa: E501
:rtype: list[object]
"""
return self._caused_by
@caused_by.setter
def caused_by(self, caused_by):
"""Sets the caused_by of this Birth.
Description not available # noqa: E501
:param caused_by: The caused_by of this Birth. # noqa: E501
:type: list[object]
"""
self._caused_by = caused_by
@property
def label(self):
"""Gets the label of this Birth. # noqa: E501
short description of the resource # noqa: E501
:return: The label of this Birth. # noqa: E501
:rtype: list[str]
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this Birth.
short description of the resource # noqa: E501
:param label: The label of this Birth. # noqa: E501
:type: list[str]
"""
self._label = label
@property
def type(self):
"""Gets the type of this Birth. # noqa: E501
type of the resource # noqa: E501
:return: The type of this Birth. # noqa: E501
:rtype: list[str]
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this Birth.
type of the resource # noqa: E501
:param type: The type of this Birth. # noqa: E501
:type: list[str]
"""
self._type = type
@property
def participant(self):
"""Gets the participant of this Birth. # noqa: E501
Description not available # noqa: E501
:return: The participant of this Birth. # noqa: E501
:rtype: list[str]
"""
return self._participant
@participant.setter
def participant(self, participant):
"""Sets the participant of this Birth.
Description not available # noqa: E501
:param participant: The participant of this Birth. # noqa: E501
:type: list[str]
"""
self._participant = participant
@property
def duration(self):
"""Gets the duration of this Birth. # noqa: E501
The duration of the item (movie, audio recording, event, etc.) in ISO 8601 date format # noqa: E501
:return: The duration of this Birth. # noqa: E501
:rtype: list[float]
"""
return self._duration
@duration.setter
def duration(self, duration):
"""Sets the duration of this Birth.
The duration of the item (movie, audio recording, event, etc.) in ISO 8601 date format # noqa: E501
:param duration: The duration of this Birth. # noqa: E501
:type: list[float]
"""
self._duration = duration
@property
def previous_event(self):
"""Gets the previous_event of this Birth. # noqa: E501
Description not available # noqa: E501
:return: The previous_event of this Birth. # noqa: E501
:rtype: list[object]
"""
return self._previous_event
@previous_event.setter
def previous_event(self, previous_event):
"""Sets the previous_event of this Birth.
Description not available # noqa: E501
:param previous_event: The previous_event of this Birth. # noqa: E501
:type: list[object]
"""
self._previous_event = previous_event
@property
def next_event(self):
"""Gets the next_event of this Birth. # noqa: E501
Description not available # noqa: E501
:return: The next_event of this Birth. # noqa: E501
:rtype: list[object]
"""
return self._next_event
@next_event.setter
def next_event(self, next_event):
"""Sets the next_event of this Birth.
Description not available # noqa: E501
:param next_event: The next_event of this Birth. # noqa: E501
:type: list[object]
"""
self._next_event = next_event
@property
def id(self):
"""Gets the id of this Birth. # noqa: E501
identifier # noqa: E501
:return: The id of this Birth. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Birth.
identifier # noqa: E501
:param id: The id of this Birth. # noqa: E501
:type: str
"""
self._id = id
@property
def following_event(self):
"""Gets the following_event of this Birth. # noqa: E501
Description not available # noqa: E501
:return: The following_event of this Birth. # noqa: E501
:rtype: list[object]
"""
return self._following_event
@following_event.setter
def following_event(self, following_event):
"""Sets the following_event of this Birth.
Description not available # noqa: E501
:param following_event: The following_event of this Birth. # noqa: E501
:type: list[object]
"""
self._following_event = following_event
@property
def start_date(self):
"""Gets the start_date of this Birth. # noqa: E501
The start date of the event. # noqa: E501
:return: The start_date of this Birth. # noqa: E501
:rtype: list[str]
"""
return self._start_date
@start_date.setter
def start_date(self, start_date):
"""Sets the start_date of this Birth.
The start date of the event. # noqa: E501
:param start_date: The start_date of this Birth. # noqa: E501
:type: list[str]
"""
self._start_date = start_date
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Birth):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Birth):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
c2e62e1fed4a5cfe95ae15e61a259807f50e7916 | f3a0f6fa4aea4a3f03594b7c2b863fc0ab9955ae | /plaid/model/sandbox_income_fire_webhook_request.py | a38b0d609e7b15992d81f5de2960349b027c7b18 | [
"MIT"
] | permissive | DougVaderJr/plaid-python | 78aac933703e5bc58e0a3e924dc9933c9954f825 | e9aee4f492c14c3eeda8158f84aafcd7e9c3ba0b | refs/heads/master | 2023-07-12T16:44:25.289248 | 2021-08-16T21:27:42 | 2021-08-16T21:27:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,285 | py | """
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
class SandboxIncomeFireWebhookRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('verification_status',): {
'PROCESSING_COMPLETE': "VERIFICATION_STATUS_PROCESSING_COMPLETE",
'DOCUMENT_REJECTED': "VERIFICATION_STATUS_DOCUMENT_REJECTED",
'PROCESSING_FAILED': "VERIFICATION_STATUS_PROCESSING_FAILED",
},
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
'income_verification_id': (str,), # noqa: E501
'webhook': (str,), # noqa: E501
'verification_status': (str,), # noqa: E501
'client_id': (str,), # noqa: E501
'secret': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'income_verification_id': 'income_verification_id', # noqa: E501
'webhook': 'webhook', # noqa: E501
'verification_status': 'verification_status', # noqa: E501
'client_id': 'client_id', # noqa: E501
'secret': 'secret', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, income_verification_id, webhook, verification_status, *args, **kwargs): # noqa: E501
"""SandboxIncomeFireWebhookRequest - a model defined in OpenAPI
Args:
income_verification_id (str): The ID of the verification.
webhook (str): The URL to which the webhook should be sent.
verification_status (str): `VERIFICATION_STATUS_PROCESSING_COMPLETE`: The income verification status processing has completed. `VERIFICATION_STATUS_DOCUMENT_REJECTED`: The documentation uploaded by the end user was recognized as a supported file format, but not recognized as a valid paystub. `VERIFICATION_STATUS_PROCESSING_FAILED`: A failure occurred when attempting to process the verification documentation.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
client_id (str): Your Plaid API `client_id`. The `client_id` is required and may be provided either in the `PLAID-CLIENT-ID` header or as part of a request body.. [optional] # noqa: E501
secret (str): Your Plaid API `secret`. The `secret` is required and may be provided either in the `PLAID-SECRET` header or as part of a request body.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.income_verification_id = income_verification_id
self.webhook = webhook
self.verification_status = verification_status
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
7dc6169ff09acd315578b8f111952fafd2380428 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/image_classification/Swin-Transformer_ID2377_for_PyTorch/logger.py | ffb47f2fd264aaa1749ce089aabd768a7b50cbad | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 2,695 | py | #
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
import logging
import sys
import os
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
def create_logger(output_dir, dist_rank=0, name=''):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.propagate = False
fmt = '[%(asctime)s %(name)s] (%(filename)s %(lineno)d): %(levelname)s %(message)s'
#if dist_rank == 0:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(console_handler)
file_handler = logging.FileHandler(os.path.join(output_dir, f'log_rank{dist_rank}.txt'), mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(logging.Formatter(fmt=fmt, datefmt='%Y-%m-%d %H:%M:%S'))
logger.addHandler(file_handler)
return logger | [
"[email protected]"
] | |
1890a6e4eb9ea623fd1b3048b6bdd2df43af873f | a7058080e41af37eb77c146fc09a5e4db57f7ec6 | /Solved/10817/10817.py | b367bcb33c195c11704251209f5f52883256727a | [] | no_license | Jinmin-Goh/BOJ_PS | bec0922c01fbf6e440589cc684d0cd736e775066 | 09a285bd1369bd0d73f86386b343d271dc08a67d | refs/heads/master | 2022-09-24T02:24:50.823834 | 2022-09-21T02:16:22 | 2022-09-21T02:16:22 | 223,768,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 202 | py | # Problem No.: 10817
# Solver: Jinmin Goh
# Date: 191120
# URL: https://www.acmicpc.net/problem/10817
a = input()
a = a.split()
for i in range(3):
a[i] = int(a[i])
a.sort()
print(a[1]) | [
"[email protected]"
] | |
9cdaa82fda89ad9951a6b8d5c94f1ebd93c88177 | 2f963d7989749037a3ec27aaa39b31416b33cbb2 | /ib_action/views/user_action_counts/tests/test_case_01.py | afa4eddc49ff53e95a58568a880e84464e0d4139 | [] | no_license | migsantos121/phd3-backend | 3cd014908856c995de3c4473d82059bc9c1b5794 | 9d1d2bd6f55dc89719ce5a1916c5db3d573aec1e | refs/heads/master | 2022-12-12T17:25:59.334509 | 2020-03-09T09:24:08 | 2020-03-09T09:24:08 | 245,991,086 | 0 | 0 | null | 2022-06-28T14:45:50 | 2020-03-09T09:17:18 | Python | UTF-8 | Python | false | false | 1,559 | py | from . import APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX
from django_swagger_utils.drf_server.utils.server_gen.custom_api_test_case import CustomAPITestCase
request_body = """
{
"action_types": [
"RATE",
"RATE"
],
"source": "string",
"action_values": [
"string",
"string"
],
"entity_types": [
"string",
"string"
]
}
"""
response_body = """
[{"action_value": "string", "entity_count": 1, "action_type": "RATE", "entity_type": "string"}, {"action_value": "string", "entity_count": 1, "action_type": "RATE", "entity_type": "string"}]
"""
test_case = {
"request": {
"path_params": {},
"query_params": {},
"header_params": {},
"securities": {"oauth": {"scopes": ["read", "write"], "tokenUrl": "http://auth.ibtspl.com/oauth2/", "flow": "password", "type": "oauth2"}},
"body": request_body,
},
"response": {
"status": 200,
"body": response_body,
"header_params": {}
}
}
class TestCase01UserActionCountsAPITestCase(CustomAPITestCase):
def __init__(self, *args, **kwargs):
super(TestCase01UserActionCountsAPITestCase, self).__init__(APP_NAME, OPERATION_NAME, REQUEST_METHOD, URL_SUFFIX, test_case,
*args, **kwargs)
def test_case(self):
response = super(TestCase01UserActionCountsAPITestCase, self).test_case()
# your extended implementation of test case
self.assertEqual(response.status_code, 200)
| [
"[email protected]"
] | |
69975169bb1b1827b925e00ea4eae20831cca216 | 7950c4faf15ec1dc217391d839ddc21efd174ede | /problems/0342.0_Power_of_Four.py | a0a5a32e27c28c00543bc71d410f6523723b5a68 | [] | no_license | lixiang2017/leetcode | f462ecd269c7157aa4f5854f8c1da97ca5375e39 | f93380721b8383817fe2b0d728deca1321c9ef45 | refs/heads/master | 2023-08-25T02:56:58.918792 | 2023-08-22T16:43:36 | 2023-08-22T16:43:36 | 153,090,613 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | '''
Success
Details
Runtime: 16 ms, faster than 83.07% of Python online submissions for Power of Four.
Memory Usage: 11.9 MB, less than 11.11% of Python online submissions for Power of Four.
'''
class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
if num <= 0:
return False
while num > 1:
remainder = num % 4
if remainder != 0:
return False
else:
num = num / 4
return True
if __name__ == "__main__":
num = 16
assert Solution().isPowerOfFour(num)
num = 5
assert not Solution().isPowerOfFour(num)
num = 0
assert not Solution().isPowerOfFour(num)
| [
"[email protected]"
] | |
005ac079befb88d6eec83627239178a0d305530e | fd513d0ef02231526fc8225dfdb1bf84752b3541 | /jaraco/packaging/depends.py | ebeddfe0fef449ba636d347cc3a54377cd87d3f4 | [
"MIT"
] | permissive | artynet/jaraco.packaging | 810bfb83238a9075de6001deec9eaaca56db64a9 | d68128b9c8c12334364128aec05b038102515159 | refs/heads/master | 2022-11-15T10:50:18.224592 | 2020-02-16T15:56:37 | 2020-02-16T15:56:37 | 277,494,378 | 0 | 0 | null | 2020-07-06T09:08:05 | 2020-07-06T09:08:04 | null | UTF-8 | Python | false | false | 3,245 | py | """
This module should only import modules from stdlib and setuptools
"""
from __future__ import print_function, unicode_literals
import os
import re
import argparse
import subprocess
import setuptools
import pkg_resources
text_type = getattr(__builtins__, 'unicode', str)
req_help = "A setuptools requirement spec (e.g. 'eggmonster' or " "'eggmonster==0.1')"
python_help = "Use a remote environment rather than the local one."
def tree_cmd():
parser = argparse.ArgumentParser()
parser.add_argument('requirement', help=req_help)
parser.add_argument('--python', help=python_help)
args = parser.parse_args()
if args.python:
return check_dependencies_remote(args)
check_dependencies(args.requirement)
def print_package(requirement, indent):
r = requirement
print(' ' * indent + str(r), '[{0}]'.format(pkg_resources.get_distribution(r)))
def parse_extras(req):
pattern = re.compile(r'\[(.*)\]')
res = pattern.search(text_type(req))
return res.group(1).split(',') if res else []
def check_dependencies(req, indent=1, history=None):
"""
Given a setuptools package requirement (e.g. 'gryphon==2.42' or just
'gryphon'), print a tree of dependencies as they resolve in this
environment.
"""
# keep a history to avoid infinite loops
if history is None:
history = set()
if req in history:
return
history.add(req)
d = pkg_resources.get_distribution(req)
extras = parse_extras(req)
if indent == 1:
print_package(req, 0)
for r in d.requires(extras=extras):
print_package(r, indent)
check_dependencies(r, indent + 1, history)
def load_dependencies(req, history=None):
"""
Load the dependency tree as a Python object tree,
suitable for JSON serialization.
>>> deps = load_dependencies('jaraco.packaging')
>>> import json
>>> doc = json.dumps(deps)
"""
if history is None:
history = set()
dist = pkg_resources.get_distribution(req)
spec = dict(requirement=str(req), resolved=str(dist))
if req not in history:
# traverse into children
history.add(req)
extras = parse_extras(req)
depends = [
load_dependencies(dep, history=history)
for dep in dist.requires(extras=extras)
]
if depends:
spec.update(depends=depends)
return spec
class DependencyTree(setuptools.Command):
description = "Report a tree of resolved dependencies"
user_options = [
(str('requirement='), str('r'), req_help),
(str('python='), str('p'), python_help),
]
def finalize_options(self):
pass
def initialize_options(self):
self.requirement = self.distribution.get_name()
self.python = None
def run(self):
if self.python:
return check_dependencies_remote(self)
check_dependencies(self.requirement)
def check_dependencies_remote(args):
"""
Invoke this command on a remote Python.
"""
cmd = [args.python, '-m', 'depends', args.requirement]
env = dict(PYTHONPATH=os.path.dirname(__file__))
return subprocess.check_call(cmd, env=env)
if __name__ == '__main__':
tree_cmd()
| [
"[email protected]"
] | |
1a44630bcc49abc465f08b79db51861073294d16 | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /284/284.peeking-iterator.233519966.Accepted.leetcode.py | 363695e26d3e5481086fc95101ac8197fd2fcef5 | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | class PeekingIterator(object):
def __init__(self, iterator):
self.front = None
self.it = iterator
if self.it.hasNext():
self.front = self.it.next()
def peek(self):
return self.front
def next(self):
temp = self.front
self.front = None
if self.it.hasNext():
self.front = self.it.next()
return temp
def hasNext(self):
return bool(self.front)
| [
"[email protected]"
] | |
13ad90a72e1503b0419dd453068d2ddfbf4c2ed3 | 71e5ce7abbf5f9e8887c16e7a89219a98a5827cf | /Python-for-everyone/01_For_Everyone/05_Loop_Iterations/06_count.py | 1882c6ed7cd3efc4bb7006b59f6552575a63ad71 | [] | no_license | Python-Repository-Hub/Learn-Online-Learning | f62c9965b34c050abcc4b2ef9a4d09600a0e52ec | 2845a0d66b1f7fa416182b99cef25cda871b3360 | refs/heads/master | 2023-07-27T17:00:44.498987 | 2021-09-14T06:10:37 | 2021-09-14T06:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 307 | py | zork = 0
print('Before', zork)
numbers = [9, 41, 12, 3, 74, 15] # 강의와는 달리 numbers라는 int를 원소로 가지는 list를 선언하였습니다.
for thing in numbers :
zork = zork + 1
print(zork, thing)
print('After', zork)
# Before 0
# 1 9
# 2 41
# 3 12
# 4 3
# 5 74
# 6 15
# After 6
| [
"[email protected]"
] | |
d7e643eac7eefa815456f66073d2a633d5e65bc2 | 8fc2707bc30c8e56a607e0fd97122d3509ce6dbd | /Pithon/pithon-game/core/Player.py | 89d02f6bf8ba37de6cc6a14df1ee70ebc16438f7 | [] | no_license | pithon/pithon | 8c0239f527866ce05b327b436350dcc0e7fab4cb | 9a183be17464a810c0c047fbc29b52451d39f641 | refs/heads/master | 2021-01-18T14:10:18.606353 | 2013-02-23T04:41:38 | 2013-02-23T04:41:38 | 7,985,688 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | import pygame, constants
from Tail import Tail
class Player(object):
def __init__(self, color):
self.tail=[]
head=Tail(pygame.color.Color(255,255,255), self, 116,100, constants.RIGHT)
self.tail.append(head)
self.color=color
self.direction=constants.RIGHT
self.extend=True
def add_tail(self):
self.extend=True
def update_tail(self, direction):
save_direction=direction
for i in self.tail:
save_direction=i.direction
i.direction=direction
direction=save_direction
i.update()
if self.extend:
last_part = self.tail[-1]
new_part = Tail(self.color, self, last_part.rect.x, last_part.rect.y, last_part.direction)
self.tail.append(new_part)
self.extend=False
def render_tail(self, screen):
for i in self.tail:
screen.blit(i.image, i.rect) | [
"[email protected]"
] | |
8229bbf974f9c0d3d5cfb6455f6d0e95c0bc8258 | d6617514df849a77b491159c4cc8a3bfc6599d83 | /hw2/DecryptDES.py | 7f2088a9af6d0c0cc5ae44fca4c41c088699bebb | [] | no_license | LJP-TW/Information_Security_Class | 409cc852480c611d89b60139705bc9de4374670d | d195b99c62e653ec551ce50077c6d29b23668b8c | refs/heads/master | 2020-08-01T19:26:54.886014 | 2019-12-22T14:10:04 | 2019-12-22T14:10:04 | 211,090,532 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,145 | py | #!/usr/bin/python3
import sys
import numpy as np
key = bin(int(sys.argv[1], 16))[2:]
text = bin(int(sys.argv[2], 16))[2:]
key = '0' * (64 - len(key)) + key
text = '0' * (64 - len(text)) + text
##### table
IP = [int(x)-1 for x in ('58 50 42 34 26 18 10 2 60 52 44 36 28 20 12 4 62 54 46 38 30 22 14 6 64 56 48 40 32 24 16 8 57 49 41 33 25 17 9 1 59 51 43 35 27 19 11 3 61 53 45 37 29 21 13 5 63 55 47 39 31 23 15 7'.split())]
IP_1 = [int(x)-1 for x in ('40 8 48 16 56 24 64 32 39 7 47 15 55 23 63 31 38 6 46 14 54 22 62 30 37 5 45 13 53 21 61 29 36 4 44 12 52 20 60 28 35 3 43 11 51 19 59 27 34 2 42 10 50 18 58 26 33 1 41 9 49 17 57 25'.split())]
E = [int(x)-1 for x in ('32 1 2 3 4 5 4 5 6 7 8 9 8 9 10 11 12 13 12 13 14 15 16 17 16 17 18 19 20 21 20 21 22 23 24 25 24 25 26 27 28 29 28 29 30 31 32 1'.split())]
PC_1 = [int(x)-1 for x in ('57 49 41 33 25 17 9 1 58 50 42 34 26 18 10 2 59 51 43 35 27 19 11 3 60 52 44 36 63 55 47 39 31 23 15 7 62 54 46 38 30 22 14 6 61 53 45 37 29 21 13 5 28 20 12 4'.split())]
PC_2 = [int(x)-1 for x in ('14 17 11 24 1 5 3 28 15 6 21 10 23 19 12 4 26 8 16 7 27 20 13 2 41 52 31 37 47 55 30 40 51 45 33 48 44 49 39 56 34 53 46 42 50 36 29 32'.split())]
P = [int(x)-1 for x in ('16 7 20 21 29 12 28 17 1 15 23 26 5 18 31 10 2 8 24 14 32 27 3 9 19 13 30 6 22 11 4 25'.split())]
Sbox = []
Sbox.append(np.array([int(x) for x in ('14 4 13 1 2 15 11 8 3 10 6 12 5 9 0 7 0 15 7 4 14 2 13 1 10 6 12 11 9 5 3 8 4 1 14 8 13 6 2 11 15 12 9 7 3 10 5 0 15 12 8 2 4 9 1 7 5 11 3 14 10 0 6 13'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('15 1 8 14 6 11 3 4 9 7 2 13 12 0 5 10 3 13 4 7 15 2 8 14 12 0 1 10 6 9 11 5 0 14 7 11 10 4 13 1 5 8 12 6 9 3 2 15 13 8 10 1 3 15 4 2 11 6 7 12 0 5 14 9'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('10 0 9 14 6 3 15 5 1 13 12 7 11 4 2 8 13 7 0 9 3 4 6 10 2 8 5 14 12 11 15 1 13 6 4 9 8 15 3 0 11 1 2 12 5 10 14 7 1 10 13 0 6 9 8 7 4 15 14 3 11 5 2 12'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('7 13 14 3 0 6 9 10 1 2 8 5 11 12 4 15 13 8 11 5 6 15 0 3 4 7 2 12 1 10 14 9 10 6 9 0 12 11 7 13 15 1 3 14 5 2 8 4 3 15 0 6 10 1 13 8 9 4 5 11 12 7 2 14'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('2 12 4 1 7 10 11 6 8 5 3 15 13 0 14 9 14 11 2 12 4 7 13 1 5 0 15 10 3 9 8 6 4 2 1 11 10 13 7 8 15 9 12 5 6 3 0 14 11 8 12 7 1 14 2 13 6 15 0 9 10 4 5 3'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('12 1 10 15 9 2 6 8 0 13 3 4 14 7 5 11 10 15 4 2 7 12 9 5 6 1 13 14 0 11 3 8 9 14 15 5 2 8 12 3 7 0 4 10 1 13 11 6 4 3 2 12 9 5 15 10 11 14 1 7 6 0 8 13'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('4 11 2 14 15 0 8 13 3 12 9 7 5 10 6 1 13 0 11 7 4 9 1 10 14 3 5 12 2 15 8 6 1 4 11 13 12 3 7 14 10 15 6 8 0 5 9 2 6 11 13 8 1 4 10 7 9 5 0 15 14 2 3 12'.split())]).reshape(4,16))
Sbox.append(np.array([int(x) for x in ('13 2 8 4 6 15 11 1 10 9 3 14 5 0 12 7 1 15 13 8 10 3 7 4 12 5 6 11 0 14 9 2 7 11 4 1 9 12 14 2 0 6 10 13 15 3 5 8 2 1 14 7 4 10 8 13 15 12 9 0 3 5 6 11'.split())]).reshape(4,16))
LS = [1, 2, 4, 6, 8, 10, 12, 14, 15, 17, 19, 21, 23, 25, 27, 0][::-1]
#### end
def convert(data, table):
newData = ''
for i in range(len(table)):
newData += data[table[i]]
return newData
def xor(s1, s2):
s = ''
for i in range(len(s1)):
if(s1[i] == s2[i]):
s += '0'
else:
s += '1'
return s
def SubKey(K, i):
C = K[0:28]
D = K[28:]
C = C[LS[i]:] + C[:LS[i]]
D = D[LS[i]:] + D[:LS[i]]
return convert(C + D, PC_2)
def foo(R, K):
B = xor(convert(R, E), K)
z = ''
for i in range(8):
b = B[i*6: i*6 + 6]
y = int((b[0]+b[5]), 2)
x = int(b[1:5], 2)
out = bin(Sbox[i][y][x])[2:]
out = '0' * (4 - len(out)) + out
z += out
return convert(z, P)
key = convert(key, PC_1)
text = convert(text, IP)
L = text[32:]
R = text[0:32]
LN = ''
RN = ''
for i in range(16):
LN = xor(R, foo(L, SubKey(key, i)))
RN = L
L = LN
R = RN
ans = hex(int(convert(L + R, IP_1), 2))[2:].lower()
ans = '0x' + ans
print(ans)
| [
"[email protected]"
] | |
d493b4b047bfc276bb5effed4193ae083b59c65b | e44c83395d2ddd1e1b7c1e521d360f2ef8d585d0 | /gitlab-new/tt/bus8684/items.py | 4c8f025a4aa785fb80e448fde15776b7fd3226b0 | [] | no_license | zhoujx4/Crawls | 63ebcac5b4c0bbccdde56e6a2f5efbc4091d03e0 | 94b3ac88d7e49cb4a03e7b211a9437709d1c371c | refs/heads/master | 2020-12-23T15:25:48.041965 | 2020-01-30T10:35:19 | 2020-01-30T10:35:19 | 237,189,197 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 432 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item, Field
class Bus8684Item(scrapy.Item):
# define the fields for your item here like:
content = Field()
page_type = Field()
# Housekeeping fields
url = Field()
project = Field()
spider = Field()
server = Field()
date = Field()
| [
"[email protected]"
] | |
499d2b9e316af87f449192a3c3de07b8cf55b4f0 | 03de685efae7d8f6de0e98c3008cb89f87825fb4 | /test/compare/compare.py | c11c4504dcda17b5e254ee24b4672cc0d22cea78 | [] | no_license | gedeschaines/robotics-toolbox-python | 161f7af8be91c51e1902021ba9f9dc3f6fc5b766 | 22eb2394172e60b1dbca03d4be9bb0ecaf49b183 | refs/heads/master | 2021-06-14T00:42:24.468518 | 2021-02-17T22:20:36 | 2021-02-17T22:20:36 | 140,235,483 | 13 | 3 | null | 2019-01-06T13:30:23 | 2018-07-09T05:29:19 | Python | UTF-8 | Python | false | false | 1,522 | py | """
Compare the RNE implementations of Matlab and Python.
We take the Matlab results as the gold standard since they were cross-checked
against a Maple and other implementations a long time ago.
The process:
1. genpath.m creates random q, qd and qdd data and saves to path.dat. Random values
in the q, qd and qdd statespace are used with significant velocity and acceleration
so that errors in velocity and acceleration specific parts of the RNE algorithms will
be shown up. There are 60 rows:
rows 1-20, qd=qdd=0, gravity and friction torques only
rows 21-40 qdd=0, gravity, friction and centripetal/Coriolis forces
rows 41-60 all forces.
2. genpath.m creates tau for the Puma560 (DH) and saves to puma560.dat
3. genpath.m creates tau for the Puma560 (MDH) and saves to puma560m.dat
4. compare.py loads path.dat, computes the torques for DH and MDH cases and find the
difference from the Matlab versions
"""
from robot import *;
print "Compare Python and Matlab RNE implementations"
# load the (q,qd,qdd) data
path = loadtxt('path.dat');
# load the Matlab computed torques
matlab_dh = loadtxt('puma560.dat');
from robot.puma560 import *
tau = rne(p560, path);
diff = matlab_dh - tau;
#print diff
print "RNE DH, error norm =", linalg.norm(diff, 'fro')
#############
# load the Matlab computed torques
matlab_mdh = loadtxt('puma560m.dat');
from robot.puma560akb import *
tau = rne(p560m, path);
diff = matlab_mdh - tau;
#print diff
print "RNE MDH, error norm =", linalg.norm(diff, 'fro')
| [
"[email protected]"
] | |
a330d906dd0cdf05fc80449ad1547096442e3b8f | c5be188cf1231d62f7ad69c98ee71b4bc181f6f2 | /image_resize.py | b94f3f35d8941b74d58a8b394552b06f0b5a1b6d | [] | no_license | ashish1sasmal/OPENCV-Python | e2d81e4472413ba79c5cdd4fcaad534225a5d394 | 589accc3c16eb4bf515ba85ee46ae6fdc2347bc6 | refs/heads/master | 2021-07-17T17:56:30.438576 | 2020-10-16T17:12:09 | 2020-10-16T17:12:09 | 217,500,976 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | import cv2
l=[]
def click_event(event,x,y,flags,param):
global l
# print(l)
if event == cv2.EVENT_LBUTTONDOWN:
l.append([x,y])
if len(l) == 3:
print(l,l[2][0]+l[1][0]-l[0][0],l[2][1]+l[1][1]-l[0][1])
img[l[2][1]:l[2][1]+l[1][1]-l[0][1], l[2][0]:l[2][0]+l[1][0]-l[0][0]] = img[l[0][1]:l[1][1], l[1][0]:l[1][0]]
cv2.imshow('Frame',img)
l=[]
img = cv2.imread("app.jpg")
cv2.imshow('Frame',img)
cv2.setMouseCallback('Frame', click_event)
cv2.waitKey(0)
cv2.destroyAllWindows()
| [
"[email protected]"
] | |
fa69246e9b983af79f51aa0df615d294d267d396 | 0ba2c3776618b5b8b76f4a23f21e9c6ad3f6e2e1 | /part2/04.py | 278a036d3564d1aaba49d2e49151f5a96d565b99 | [] | no_license | WangDongDong1234/python_code | 6dc5ce8210b1dcad7d57320c9e1946fd4b3fe302 | 6a785306a92d328a0d1427446ca773a9803d4cc0 | refs/heads/master | 2020-04-15T12:35:03.427589 | 2019-09-16T15:38:25 | 2019-09-16T15:38:25 | 164,681,323 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | class GameRole:
def __init__(self,name,ad,hp):
self.name=name
self.ad=ad
self.hp=hp
def attack(self,p):
p.hp=p.hp-self.ad
print("%s 攻击了%s,%s掉了%s,还剩%s" %(self.name,p.name,p.name,self.ad,p.hp))
def equip_weapon(self,wea):
self.wea=wea
self.ad=self.ad+wea.ad
p1=GameRole("亚瑟",20,500)
p2=GameRole("剑豪",50,300)
#p1.attack(p2)
class Weapon:
def __init__(self,name,ad):
self.name=name
self.ad=ad
def fight(self,p1,p2):
p2.hp=p2.hp-self.ad
print("%s用%s打%s,%s掉了%s血,还剩%s学" %(p1.name,self.name,p2.name,p2.name,p1.ad,p2.hp))
axe=Weapon("三板斧",2)
p1.equip_weapon(axe)
p1.attack(p2)
print(p1.wea.name)
print(p1.wea.ad) | [
"[email protected]"
] | |
81da510db2f778bb19fac6ba41f0d921b840fc1a | 0a6cd8461a8964daf237ebc50811cfd87555980e | /codingbat/string-2/1.py | 25bc1074d3eb34c9d5e4724d4ee2766d942ac815 | [] | no_license | Galymbekov/WebDev-Python | e3ef9ed5240f5df60226e0dc821e3a035bdb9017 | 0a3a3286deef392efb177f3efd37dac1eab101d3 | refs/heads/main | 2023-03-28T13:40:17.851821 | 2021-04-03T07:13:59 | 2021-04-03T07:13:59 | 353,316,046 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 107 | py | def chars(a):
res = ''
for i in a:
res = res + i * 2
return res
print(chars('Dastan')) | [
"[email protected]"
] | |
1bdffd60464eb5d1d0a4aa03bd1f51e6778e9f08 | 47eccd2a6b844bce32012017e2ad3eb62221763c | /producers/models/line.py | dc48f215d8734108a64ee764d036d040425954c1 | [] | no_license | zxy-zxy/udacity_data_streaming_traffic_optimization | 953b95536e62a18e9bdd9fd8244fbef2966789fb | 3bb8d0e6f5e4b78b0c0a9e0b3e85a3f8788f3c65 | refs/heads/master | 2023-03-07T07:42:20.397271 | 2021-02-20T07:57:41 | 2021-02-20T07:57:41 | 340,464,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,097 | py | """Defines functionality relating to train lines"""
from enum import IntEnum
import logging
from producers.models.station import Station
from producers.models.train import Train
logger = logging.getLogger(__name__)
class Line:
"""Contains Chicago Transit Authority (CTA) Elevated Loop Train ("L") Station Data"""
colors = IntEnum("colors", "blue green red", start=0)
num_directions = 2
def __init__(self, color, station_data, num_trains=10):
self.color = color
self.num_trains = num_trains
self.stations = self._build_line_data(station_data)
# We must always discount the terminal station at the end of each direction
self.num_stations = len(self.stations) - 1
self.trains = self._build_trains()
def _build_line_data(self, station_df):
"""Constructs all stations on the line"""
stations = station_df["station_name"].unique()
station_data = station_df[station_df["station_name"] == stations[0]]
line = [Station(station_data["station_id"].unique()[0], stations[0], self.color)]
prev_station = line[0]
for station in stations[1:]:
station_data = station_df[station_df["station_name"] == station]
new_station = Station(
station_data["station_id"].unique()[0],
station,
self.color,
prev_station,
)
prev_station.dir_b = new_station
prev_station = new_station
line.append(new_station)
return line
def _build_trains(self):
"""Constructs and assigns train objects to stations"""
trains = []
curr_loc = 0
b_dir = True
for train_id in range(self.num_trains):
tid = str(train_id).zfill(3)
train = Train(f"{self.color.name[0].upper()}L{tid}", Train.status.in_service)
trains.append(train)
if b_dir:
self.stations[curr_loc].arrive_b(train, None, None)
else:
self.stations[curr_loc].arrive_a(train, None, None)
curr_loc, b_dir = self._get_next_idx(curr_loc, b_dir)
return trains
def run(self, timestamp, time_step):
"""Advances trains between stations in the simulation. Runs turnstiles."""
self._advance_turnstiles(timestamp, time_step)
self._advance_trains()
def close(self):
"""Called to stop the simulation"""
_ = [station.close() for station in self.stations]
def _advance_turnstiles(self, timestamp, time_step):
"""Advances the turnstiles in the simulation"""
_ = [station.turnstile.run(timestamp, time_step) for station in self.stations]
def _advance_trains(self):
"""Advances trains between stations in the simulation"""
# Find the first b train
curr_train, curr_index, b_direction = self._next_train()
self.stations[curr_index].b_train = None
trains_advanced = 0
while trains_advanced < self.num_trains - 1:
# The train departs the current station
if b_direction is True:
self.stations[curr_index].b_train = None
else:
self.stations[curr_index].a_train = None
prev_station = self.stations[curr_index].station_id
prev_dir = "b" if b_direction else "a"
# Advance this train to the next station
curr_index, b_direction = self._get_next_idx(curr_index, b_direction, step_size=1)
if b_direction is True:
self.stations[curr_index].arrive_b(curr_train, prev_station, prev_dir)
else:
self.stations[curr_index].arrive_a(curr_train, prev_station, prev_dir)
# Find the next train to advance
move = 1 if b_direction else -1
next_train, curr_index, b_direction = self._next_train(curr_index + move, b_direction)
if b_direction is True:
curr_train = self.stations[curr_index].b_train
else:
curr_train = self.stations[curr_index].a_train
curr_train = next_train
trains_advanced += 1
# The last train departs the current station
if b_direction is True:
self.stations[curr_index].b_train = None
else:
self.stations[curr_index].a_train = None
# Advance last train to the next station
prev_station = self.stations[curr_index].station_id
prev_dir = "b" if b_direction else "a"
curr_index, b_direction = self._get_next_idx(curr_index, b_direction, step_size=1)
if b_direction is True:
self.stations[curr_index].arrive_b(curr_train, prev_station, prev_dir)
else:
self.stations[curr_index].arrive_a(curr_train, prev_station, prev_dir)
def _next_train(self, start_index=0, b_direction=True, step_size=1):
"""Given a starting index, finds the next train in either direction"""
if b_direction is True:
curr_index = self._next_train_b(start_index, step_size)
if curr_index == -1:
curr_index = self._next_train_a(len(self.stations) - 1, step_size)
b_direction = False
else:
curr_index = self._next_train_a(start_index, step_size)
if curr_index == -1:
curr_index = self._next_train_b(0, step_size)
b_direction = True
if b_direction is True:
return self.stations[curr_index].b_train, curr_index, True
return self.stations[curr_index].a_train, curr_index, False
def _next_train_b(self, start_index, step_size):
"""Finds the next train in the b direction, if any"""
for i in range(start_index, len(self.stations), step_size):
if self.stations[i].b_train is not None:
return i
return -1
def _next_train_a(self, start_index, step_size):
"""Finds the next train in the a direction, if any"""
for i in range(start_index, 0, -step_size):
if self.stations[i].a_train is not None:
return i
return -1
def _get_next_idx(self, curr_index, b_direction, step_size=None):
"""Calculates the next station index. Returns next index and if it is b direction"""
if step_size is None:
step_size = int((self.num_stations * Line.num_directions) / self.num_trains)
if b_direction is True:
next_index = curr_index + step_size
if next_index < self.num_stations:
return next_index, True
else:
return self.num_stations - (next_index % self.num_stations), False
else:
next_index = curr_index - step_size
if next_index > 0:
return next_index, False
else:
return abs(next_index), True
def __str__(self):
return "\n".join(str(station) for station in self.stations)
def __repr__(self):
return str(self)
| [
"[email protected]"
] | |
1321ca6e8b06bce640ef7a93d92acd4a9d7814d7 | 9f0f5816b9d810c9ce01c56588024e1c804809fe | /study/day9/9-1.py | 1061d19b08630454e68f6d7886f13dbd06d09571 | [] | no_license | parkwisdom/Python-Study-step1 | bf8cc8c5f89bfb9ccbb395a3827e23d4f0d6ae9a | bae2f5653c5a0d1eac1d4b89476ece7e0802d33b | refs/heads/master | 2020-04-03T13:49:58.990930 | 2018-10-30T00:37:29 | 2018-10-30T00:37:29 | 155,300,210 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # x=dict(zip(range(1,11),[2**i for i in range(1,11)]))
# print(x)
# print(len(x))
#
# test={'gildong':{'w':70,'h':170,'b':'a'},'donggil':{'w':75,'h':160,'b':'ab'},'gildo':{'w':72,'h':150,'b':'o'}}
# print(test)
# print(test['gildong'])
# print(test['gildong']['h'])
#
# test['gildong']['h']+=10
# print(test['gildong'])
#
# test['gildong']['h']=200
# print(test['gildong'])
# import sys
# print(sys.maxsize)
category_dict = {'광고':5,'중요':5}
print(category_dict.values()) | [
"[email protected]"
] | |
f68d9f01c4273c795a53da9a7898f803f20f55ef | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/ICalGLib/ErrorEnum.py | cb339c0d2892d8b1eb6b268094f725e78eced2eb | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 14,208 | py | # encoding: utf-8
# module gi.repository.ICalGLib
# from /usr/lib64/girepository-1.0/ICalGLib-3.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
class ErrorEnum(__gobject.GEnum):
# no doc
def as_integer_ratio(self): # real signature unknown; restored from __doc__
"""
Return integer ratio.
Return a pair of integers, whose ratio is exactly equal to the original int
and with a positive denominator.
>>> (10).as_integer_ratio()
(10, 1)
>>> (-10).as_integer_ratio()
(-10, 1)
>>> (0).as_integer_ratio()
(0, 1)
"""
pass
def bit_length(self): # real signature unknown; restored from __doc__
"""
Number of bits necessary to represent self in binary.
>>> bin(37)
'0b100101'
>>> (37).bit_length()
6
"""
pass
def conjugate(self, *args, **kwargs): # real signature unknown
""" Returns self, the complex conjugate of any int. """
pass
def from_bytes(self, *args, **kwargs): # real signature unknown
"""
Return the integer represented by the given array of bytes.
bytes
Holds the array of bytes to convert. The argument must either
support the buffer protocol or be an iterable object producing bytes.
Bytes and bytearray are examples of built-in objects that support the
buffer protocol.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Indicates whether two's complement is used to represent the integer.
"""
pass
def to_bytes(self, *args, **kwargs): # real signature unknown
"""
Return an array of bytes representing an integer.
length
Length of bytes object to use. An OverflowError is raised if the
integer is not representable with the given number of bytes.
byteorder
The byte order used to represent the integer. If byteorder is 'big',
the most significant byte is at the beginning of the byte array. If
byteorder is 'little', the most significant byte is at the end of the
byte array. To request the native byte order of the host system, use
`sys.byteorder' as the byte order value.
signed
Determines whether two's complement is used to represent the integer.
If signed is False and a negative integer is given, an OverflowError
is raised.
"""
pass
def __abs__(self, *args, **kwargs): # real signature unknown
""" abs(self) """
pass
def __add__(self, *args, **kwargs): # real signature unknown
""" Return self+value. """
pass
def __and__(self, *args, **kwargs): # real signature unknown
""" Return self&value. """
pass
def __bool__(self, *args, **kwargs): # real signature unknown
""" self != 0 """
pass
def __ceil__(self, *args, **kwargs): # real signature unknown
""" Ceiling of an Integral returns itself. """
pass
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __divmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(self, value). """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __float__(self, *args, **kwargs): # real signature unknown
""" float(self) """
pass
def __floordiv__(self, *args, **kwargs): # real signature unknown
""" Return self//value. """
pass
def __floor__(self, *args, **kwargs): # real signature unknown
""" Flooring an Integral returns itself. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getnewargs__(self, *args, **kwargs): # real signature unknown
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __index__(self, *args, **kwargs): # real signature unknown
""" Return self converted to an integer, if self is suitable for use as an index into a list. """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __int__(self, *args, **kwargs): # real signature unknown
""" int(self) """
pass
def __invert__(self, *args, **kwargs): # real signature unknown
""" ~self """
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lshift__(self, *args, **kwargs): # real signature unknown
""" Return self<<value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __mod__(self, *args, **kwargs): # real signature unknown
""" Return self%value. """
pass
def __mul__(self, *args, **kwargs): # real signature unknown
""" Return self*value. """
pass
def __neg__(self, *args, **kwargs): # real signature unknown
""" -self """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __or__(self, *args, **kwargs): # real signature unknown
""" Return self|value. """
pass
def __pos__(self, *args, **kwargs): # real signature unknown
""" +self """
pass
def __pow__(self, *args, **kwargs): # real signature unknown
""" Return pow(self, value, mod). """
pass
def __radd__(self, *args, **kwargs): # real signature unknown
""" Return value+self. """
pass
def __rand__(self, *args, **kwargs): # real signature unknown
""" Return value&self. """
pass
def __rdivmod__(self, *args, **kwargs): # real signature unknown
""" Return divmod(value, self). """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __rfloordiv__(self, *args, **kwargs): # real signature unknown
""" Return value//self. """
pass
def __rlshift__(self, *args, **kwargs): # real signature unknown
""" Return value<<self. """
pass
def __rmod__(self, *args, **kwargs): # real signature unknown
""" Return value%self. """
pass
def __rmul__(self, *args, **kwargs): # real signature unknown
""" Return value*self. """
pass
def __ror__(self, *args, **kwargs): # real signature unknown
""" Return value|self. """
pass
def __round__(self, *args, **kwargs): # real signature unknown
"""
Rounding an Integral returns itself.
Rounding with an ndigits argument also returns an integer.
"""
pass
def __rpow__(self, *args, **kwargs): # real signature unknown
""" Return pow(value, self, mod). """
pass
def __rrshift__(self, *args, **kwargs): # real signature unknown
""" Return value>>self. """
pass
def __rshift__(self, *args, **kwargs): # real signature unknown
""" Return self>>value. """
pass
def __rsub__(self, *args, **kwargs): # real signature unknown
""" Return value-self. """
pass
def __rtruediv__(self, *args, **kwargs): # real signature unknown
""" Return value/self. """
pass
def __rxor__(self, *args, **kwargs): # real signature unknown
""" Return value^self. """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Returns size in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __sub__(self, *args, **kwargs): # real signature unknown
""" Return self-value. """
pass
def __truediv__(self, *args, **kwargs): # real signature unknown
""" Return self/value. """
pass
def __trunc__(self, *args, **kwargs): # real signature unknown
""" Truncating an Integral returns itself. """
pass
def __xor__(self, *args, **kwargs): # real signature unknown
""" Return self^value. """
pass
denominator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the denominator of a rational number in lowest terms"""
imag = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the imaginary part of a complex number"""
numerator = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the numerator of a rational number in lowest terms"""
real = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""the real part of a complex number"""
value_name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
value_nick = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
ALLOCATION_ERROR = 3
BADARG_ERROR = 1
FILE_ERROR = 7
INTERNAL_ERROR = 6
MALFORMEDDATA_ERROR = 4
NEWFAILED_ERROR = 2
NO_ERROR = 0
PARSE_ERROR = 5
UNIMPLEMENTED_ERROR = 9
UNKNOWN_ERROR = 10
USAGE_ERROR = 8
__class__ = type
__dict__ = None # (!) real value is "mappingproxy({'__module__': 'gi.repository.ICalGLib', '__dict__': <attribute '__dict__' of 'ErrorEnum' objects>, '__doc__': None, '__gtype__': <GType PyICalGLibErrorEnum (94403187979232)>, '__enum_values__': {0: <enum I_CAL_NO_ERROR of type ICalGLib.ErrorEnum>, 1: <enum I_CAL_BADARG_ERROR of type ICalGLib.ErrorEnum>, 2: <enum I_CAL_NEWFAILED_ERROR of type ICalGLib.ErrorEnum>, 3: <enum I_CAL_ALLOCATION_ERROR of type ICalGLib.ErrorEnum>, 4: <enum I_CAL_MALFORMEDDATA_ERROR of type ICalGLib.ErrorEnum>, 5: <enum I_CAL_PARSE_ERROR of type ICalGLib.ErrorEnum>, 6: <enum I_CAL_INTERNAL_ERROR of type ICalGLib.ErrorEnum>, 7: <enum I_CAL_FILE_ERROR of type ICalGLib.ErrorEnum>, 8: <enum I_CAL_USAGE_ERROR of type ICalGLib.ErrorEnum>, 9: <enum I_CAL_UNIMPLEMENTED_ERROR of type ICalGLib.ErrorEnum>, 10: <enum I_CAL_UNKNOWN_ERROR of type ICalGLib.ErrorEnum>}, '__info__': gi.EnumInfo(ErrorEnum), 'NO_ERROR': <enum I_CAL_NO_ERROR of type ICalGLib.ErrorEnum>, 'BADARG_ERROR': <enum I_CAL_BADARG_ERROR of type ICalGLib.ErrorEnum>, 'NEWFAILED_ERROR': <enum I_CAL_NEWFAILED_ERROR of type ICalGLib.ErrorEnum>, 'ALLOCATION_ERROR': <enum I_CAL_ALLOCATION_ERROR of type ICalGLib.ErrorEnum>, 'MALFORMEDDATA_ERROR': <enum I_CAL_MALFORMEDDATA_ERROR of type ICalGLib.ErrorEnum>, 'PARSE_ERROR': <enum I_CAL_PARSE_ERROR of type ICalGLib.ErrorEnum>, 'INTERNAL_ERROR': <enum I_CAL_INTERNAL_ERROR of type ICalGLib.ErrorEnum>, 'FILE_ERROR': <enum I_CAL_FILE_ERROR of type ICalGLib.ErrorEnum>, 'USAGE_ERROR': <enum I_CAL_USAGE_ERROR of type ICalGLib.ErrorEnum>, 'UNIMPLEMENTED_ERROR': <enum I_CAL_UNIMPLEMENTED_ERROR of type ICalGLib.ErrorEnum>, 'UNKNOWN_ERROR': <enum I_CAL_UNKNOWN_ERROR of type ICalGLib.ErrorEnum>})"
__enum_values__ = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
}
__gtype__ = None # (!) real value is '<GType PyICalGLibErrorEnum (94403187979232)>'
__info__ = gi.EnumInfo(ErrorEnum)
| [
"[email protected]"
] | |
8111196f57b743d2a02a94455bf1cf97b16635dc | 4b7e282fe480415f5d52c0fc0429f144156190fe | /google/ads/googleads/v8/services/services/conversion_adjustment_upload_service/client.py | bc6631bba483e25a44732ac56157c21f43e72e1b | [
"Apache-2.0"
] | permissive | Z2Xsoft/google-ads-python | c4750357bb19da91bb3b6bf2fa84bef9d2df36d3 | 1779d52a0446c8afb2437b0a9e103dcb849f5590 | refs/heads/main | 2023-08-18T15:22:17.840364 | 2021-09-26T04:08:53 | 2021-09-26T04:08:53 | 410,444,398 | 0 | 0 | Apache-2.0 | 2021-09-26T04:08:53 | 2021-09-26T03:55:38 | null | UTF-8 | Python | false | false | 19,751 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.ads.googleads.v8.services.types import (
conversion_adjustment_upload_service,
)
from google.rpc import status_pb2 # type: ignore
from .transports.base import (
ConversionAdjustmentUploadServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import ConversionAdjustmentUploadServiceGrpcTransport
class ConversionAdjustmentUploadServiceClientMeta(type):
"""Metaclass for the ConversionAdjustmentUploadService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[ConversionAdjustmentUploadServiceTransport]]
_transport_registry["grpc"] = ConversionAdjustmentUploadServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[ConversionAdjustmentUploadServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class ConversionAdjustmentUploadServiceClient(
metaclass=ConversionAdjustmentUploadServiceClientMeta
):
"""Service to upload conversion adjustments."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionAdjustmentUploadServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ConversionAdjustmentUploadServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> ConversionAdjustmentUploadServiceTransport:
"""Return the transport used by the client instance.
Returns:
ConversionAdjustmentUploadServiceTransport: The transport used by the client instance.
"""
return self._transport
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Return a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Return a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, ConversionAdjustmentUploadServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the conversion adjustment upload service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ConversionAdjustmentUploadServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
)
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, ConversionAdjustmentUploadServiceTransport):
# transport is a ConversionAdjustmentUploadServiceTransport instance.
if credentials:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
self._transport = transport
elif isinstance(transport, str):
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials, host=self.DEFAULT_ENDPOINT
)
else:
self._transport = ConversionAdjustmentUploadServiceGrpcTransport(
credentials=credentials,
host=api_endpoint,
ssl_channel_credentials=ssl_credentials,
client_info=client_info,
)
def upload_conversion_adjustments(
self,
request: conversion_adjustment_upload_service.UploadConversionAdjustmentsRequest = None,
*,
customer_id: str = None,
conversion_adjustments: Sequence[
conversion_adjustment_upload_service.ConversionAdjustment
] = None,
partial_failure: bool = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> conversion_adjustment_upload_service.UploadConversionAdjustmentsResponse:
r"""Processes the given conversion adjustments.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `PartialFailureError <>`__
`QuotaError <>`__ `RequestError <>`__
Args:
request (:class:`google.ads.googleads.v8.services.types.UploadConversionAdjustmentsRequest`):
The request object. Request message for
[ConversionAdjustmentUploadService.UploadConversionAdjustments][google.ads.googleads.v8.services.ConversionAdjustmentUploadService.UploadConversionAdjustments].
customer_id (:class:`str`):
Required. The ID of the customer
performing the upload.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
conversion_adjustments (:class:`Sequence[google.ads.googleads.v8.services.types.ConversionAdjustment]`):
Required. The conversion adjustments
that are being uploaded.
This corresponds to the ``conversion_adjustments`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
partial_failure (:class:`bool`):
Required. If true, successful
operations will be carried out and
invalid operations will return errors.
If false, all operations will be carried
out in one transaction if and only if
they are all valid. This should always
be set to true.
See
https://developers.google.com/google-
ads/api/docs/best-practices/partial-
failures for more information about
partial failure.
This corresponds to the ``partial_failure`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v8.services.types.UploadConversionAdjustmentsResponse:
Response message for
[ConversionAdjustmentUploadService.UploadConversionAdjustments][google.ads.googleads.v8.services.ConversionAdjustmentUploadService.UploadConversionAdjustments].
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
if request is not None and any(
[customer_id, conversion_adjustments, partial_failure]
):
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a conversion_adjustment_upload_service.UploadConversionAdjustmentsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
conversion_adjustment_upload_service.UploadConversionAdjustmentsRequest,
):
request = conversion_adjustment_upload_service.UploadConversionAdjustmentsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if conversion_adjustments is not None:
request.conversion_adjustments = conversion_adjustments
if partial_failure is not None:
request.partial_failure = partial_failure
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.upload_conversion_adjustments
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
# Done; return the response.
return response
__all__ = ("ConversionAdjustmentUploadServiceClient",)
| [
"[email protected]"
] | |
d4f12ad333b598e5780635248dd3309ec65023be | 8f48d12b88048e424ebb0d72ca6dfab5cf12ae0f | /0001_0599/238.py | 15a9a2eef432e94fcbe8a8a74aab146a918dbe24 | [] | no_license | renjieliu/leetcode | e1caf13c18a8107ed9252588b339fb76bcb1b246 | 4668b64fcb9320b6c316d8608fc61911ce43b6c7 | refs/heads/master | 2023-03-18T18:16:06.187741 | 2023-03-14T20:31:59 | 2023-03-14T20:31:59 | 128,823,819 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,217 | py | class Solution:
def productExceptSelf(self, nums: 'List[int]') -> 'List[int]':
zeroloc = -1 # record the location of 0.
total = 1
for i, n in enumerate(nums):
if n == 0:
if zeroloc == -1:
zeroloc = i
else:
return [0 for _ in nums ] # if 2 zeroes, return all 0
else:
total *= n
output = []
if zeroloc != -1: # if no zero, using total//curr. if 1 zero, return total for the zero loc. others are 0
output = [0 for _ in nums]
output[zeroloc] = total
else:
for i, n in enumerate(nums):
output.append(total//n)
return output
# previous approach
# def productExceptSelf(nums: 'List[int]'):
# #from left to right, make the first element as 1.
# ini =1
# output =[]
# for i in range(len(nums)):
# output.append(ini)
# ini*=nums[i]
# #from right to left
# ini = 1
# for i in range(len(nums) - 1, -1, -1):
# output[i] = output[i] * ini
# ini *= nums[i]
# return output
# print(productExceptSelf([1,2,3,4])) | [
"[email protected]"
] | |
7d3d9f6ca55f346b1a58b8b0bed7311a7c5743e0 | 98590747113ca3022c67c8bc6332b2bf48d7073e | /maximum_depth_of_binary_tree.py | 7ce50d022a3ba784b2b41b94dbb425e71483882f | [] | no_license | buxizhizhoum/leetcode | a54291519a23fe82e9f9620e5a2266833696f005 | cf4235170db3629b65790fd0855a8a72ac5886f7 | refs/heads/master | 2022-06-04T02:54:26.381077 | 2022-04-01T06:58:19 | 2022-04-01T06:58:19 | 116,791,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,030 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Given a binary tree, find its maximum depth.
The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
Note: A leaf is a node with no children.
Example:
Given binary tree [3,9,20,null,null,15,7],
3
/ \
9 20
/ \
15 7
return its depth = 3.
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def maxDepth(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
height = self.height(root)
return height + 1
@classmethod
def height(cls, node):
if node is None:
return -1
left = cls.height(node.left)
right = cls.height(node.right)
# the height of tree is growing from leaf node to root, if one of the
# subtree is missing, the height should not be calculated
# from the missing sub tree
# lines below is to process the situation when a sub tree is missing,
# it is necessary when calculate min depth, however it is
# not necessary in max depth, since it is to find the max height, if
# a node miss a sub tree, it will definitely not be the max height
# if node.left is None:
# height = right + 1
# return height
# if node.right is None:
# height = left + 1
# return height
height = max(left + 1, right + 1)
return height
if __name__ == "__main__":
r = TreeNode(6)
r.left = TreeNode(5)
r.right = TreeNode(9)
# r.left.left = TreeNode(3)
# r.left.left.left = TreeNode(3)
# r.left.right = TreeNode(4)
r.right.left = TreeNode(7)
r.right.right = TreeNode(11)
r.right.right.right = TreeNode(13)
s = Solution()
res = s.maxDepth(r)
print(res)
| [
"[email protected]"
] | |
461d2c40bb4f8a3b5ab23a90655470e60eda5a32 | f663f7baa042c6d5e71ceb22ce3d304d5bcbfd15 | /archived_lectures/Fall_2019/old_lectures/2018/Week_5/test_util.py | cbd0d779dd67ed6a702944cc723c2e001dbf44e6 | [
"MIT"
] | permissive | ModelEngineering/advancing-biomedical-models | 2dcf0fb35629a4cce2b30b5c18490c9837875b64 | 7469576d484d894febb161c3cb48b723dfbcaf1b | refs/heads/master | 2022-11-12T10:24:23.407006 | 2022-10-19T08:33:29 | 2022-10-19T08:33:29 | 132,825,741 | 2 | 9 | null | null | null | null | UTF-8 | Python | false | false | 1,974 | py | """
Tests for simulation utility functions.
To run: python test_util.py
"""
import util
import lmfit
import numpy as np
import unittest
ka = 0.4
v0 = 10
kb = 0.32
kc = 0.4
PARAMETERS = lmfit.Parameters()
NAMES = ["ka", "v0", "kb", "kc"]
for name in NAMES:
if name[0] == "v":
maxval = 20
else:
maxval = 2
PARAMETERS.add(name, value=eval(name), min=0, max=maxval)
PARAMETERS_COLLECTION = [PARAMETERS for _ in range(10)]
IGNORE_TEST = True
class TestFunctions(unittest.TestCase):
def testFoldGenerator(self):
NUM_FOLDS = 4
generator = util.foldGenerator(10, NUM_FOLDS)
size = len([g for g in generator])
self.assertEqual(size, NUM_FOLDS)
def testAggregateParameters(self):
parameters = util.aggregateParameters(PARAMETERS_COLLECTION)
self.assertTrue(isinstance(parameters, lmfit.Parameters))
for name in parameters:
self.assertTrue(np.isclose(
PARAMETERS.get(name), parameters.get(name)
))
def testMakeParametersStatistics(self):
parameters = util.aggregateParameters(PARAMETERS_COLLECTION)
result = util.makeParametersStatistics(PARAMETERS_COLLECTION)
for name in result.keys():
self.assertEqual(len(result[name]), 2)
self.assertTrue(np.isclose(result[name][0], parameters.get(name)))
def testPlotFit(self):
# Smoke test
util.plotFit(range(10), range(10), is_plot=False)
def testGenerateBootstrapData(self):
NUM = 1000
STD = 1.0
y_fit = np.array(range(NUM))
y_obs = y_fit + np.random.normal(0, STD, NUM)
for _ in range(10):
y_new = util.generateBootstrapData(y_obs, y_fit)
self.assertEqual(len(y_new), len(y_fit))
self.assertTrue(np.std(y_new - y_fit), STD)
def testGetParameterData(self):
result = util.getParameterData(PARAMETERS_COLLECTION)
for name in NAMES:
self.assertTrue(name in result)
self.assertTrue(np.isclose(np.std(result[name]), 0.0))
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
] | |
d511b1176c1de153e70e4026276b2a5fe5ff9283 | f2d6b7a38a2a36ec57902f5a5704c2d3f9f444cd | /ROS/build/simple_topic_publisher/catkin_generated/pkg.installspace.context.pc.py | 48352a6c8885e6e25aa470d909f60132a9935240 | [] | no_license | gbattra/RobotIgniteAcademy | 3c4a9b531cd7a4b2582a9aaf30899e363c771c68 | 4fe009333a49c813e2022f1b9f478410d2fe665b | refs/heads/master | 2022-12-08T09:41:27.911885 | 2020-08-25T01:37:45 | 2020-08-25T01:37:45 | 290,082,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "simple_topic_publisher"
PROJECT_SPACE_DIR = "/home/user/catkin_ws/install"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
] | |
3bfa5d1ece68242a859e084e744aa07bd4cd031b | c7027edceeae907ce7d21112336e84f101eeb89b | /airflow/providers/google/cloud/transfers/gcs_to_sftp.py | 964b365fc40020f9d33d0f46fd572479e6138a6a | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | jao6693/airflow | 0a8027ce33e20ee8f6032facb1b8ab453c2d20d4 | 269b608246b015c55e6cae4ed0f50b1e2bb0fa95 | refs/heads/main | 2023-01-30T18:53:23.431745 | 2022-11-05T14:59:27 | 2022-11-05T14:59:27 | 320,338,180 | 0 | 0 | Apache-2.0 | 2020-12-10T17:08:36 | 2020-12-10T17:08:35 | null | UTF-8 | Python | false | false | 8,437 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module contains Google Cloud Storage to SFTP operator."""
import os
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Optional, Sequence, Union
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.providers.sftp.hooks.sftp import SFTPHook
WILDCARD = "*"
if TYPE_CHECKING:
from airflow.utils.context import Context
class GCSToSFTPOperator(BaseOperator):
"""
Transfer files from a Google Cloud Storage bucket to SFTP server.
**Example**: ::
with models.DAG(
"example_gcs_to_sftp",
start_date=datetime(2020, 6, 19),
schedule=None,
) as dag:
# downloads file to /tmp/sftp/folder/subfolder/file.txt
copy_file_from_gcs_to_sftp = GCSToSFTPOperator(
task_id="file-copy-gsc-to-sftp",
source_bucket="test-gcs-sftp-bucket-name",
source_object="folder/subfolder/file.txt",
destination_path="/tmp/sftp",
)
# moves file to /tmp/data.txt
move_file_from_gcs_to_sftp = GCSToSFTPOperator(
task_id="file-move-gsc-to-sftp",
source_bucket="test-gcs-sftp-bucket-name",
source_object="folder/subfolder/data.txt",
destination_path="/tmp",
move_object=True,
keep_directory_structure=False,
)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GCSToSFTPOperator`
:param source_bucket: The source Google Cloud Storage bucket where the
object is. (templated)
:param source_object: The source name of the object to copy in the Google cloud
storage bucket. (templated)
You can use only one wildcard for objects (filenames) within your
bucket. The wildcard can appear inside the object name or at the
end of the object name. Appending a wildcard to the bucket name is
unsupported.
:param destination_path: The sftp remote path. This is the specified directory path for
uploading to the SFTP server.
:param keep_directory_structure: (Optional) When set to False the path of the file
on the bucket is recreated within path passed in destination_path.
:param move_object: When move object is True, the object is moved instead
of copied to the new location. This is the equivalent of a mv command
as opposed to a cp command.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param sftp_conn_id: The sftp connection id. The name or identifier for
establishing a connection to the SFTP server.
:param delegate_to: The account to impersonate using domain-wide delegation of authority,
if any. For this to work, the service account making the request must have
domain-wide delegation enabled.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"source_bucket",
"source_object",
"destination_path",
"impersonation_chain",
)
ui_color = "#f0eee4"
def __init__(
self,
*,
source_bucket: str,
source_object: str,
destination_path: str,
keep_directory_structure: bool = True,
move_object: bool = False,
gcp_conn_id: str = "google_cloud_default",
sftp_conn_id: str = "ssh_default",
delegate_to: Optional[str] = None,
impersonation_chain: Optional[Union[str, Sequence[str]]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.source_bucket = source_bucket
self.source_object = source_object
self.destination_path = destination_path
self.keep_directory_structure = keep_directory_structure
self.move_object = move_object
self.gcp_conn_id = gcp_conn_id
self.sftp_conn_id = sftp_conn_id
self.delegate_to = delegate_to
self.impersonation_chain = impersonation_chain
self.sftp_dirs = None
def execute(self, context: 'Context'):
gcs_hook = GCSHook(
gcp_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to,
impersonation_chain=self.impersonation_chain,
)
sftp_hook = SFTPHook(self.sftp_conn_id)
if WILDCARD in self.source_object:
total_wildcards = self.source_object.count(WILDCARD)
if total_wildcards > 1:
raise AirflowException(
"Only one wildcard '*' is allowed in source_object parameter. "
f"Found {total_wildcards} in {self.source_object}."
)
prefix, delimiter = self.source_object.split(WILDCARD, 1)
prefix_dirname = os.path.dirname(prefix)
objects = gcs_hook.list(self.source_bucket, prefix=prefix, delimiter=delimiter)
for source_object in objects:
destination_path = self._resolve_destination_path(source_object, prefix=prefix_dirname)
self._copy_single_object(gcs_hook, sftp_hook, source_object, destination_path)
self.log.info("Done. Uploaded '%d' files to %s", len(objects), self.destination_path)
else:
destination_path = self._resolve_destination_path(self.source_object)
self._copy_single_object(gcs_hook, sftp_hook, self.source_object, destination_path)
self.log.info("Done. Uploaded '%s' file to %s", self.source_object, destination_path)
def _resolve_destination_path(self, source_object: str, prefix: Optional[str] = None) -> str:
if not self.keep_directory_structure:
if prefix:
source_object = os.path.relpath(source_object, start=prefix)
else:
source_object = os.path.basename(source_object)
return os.path.join(self.destination_path, source_object)
def _copy_single_object(
self,
gcs_hook: GCSHook,
sftp_hook: SFTPHook,
source_object: str,
destination_path: str,
) -> None:
"""Helper function to copy single object."""
self.log.info(
"Executing copy of gs://%s/%s to %s",
self.source_bucket,
source_object,
destination_path,
)
dir_path = os.path.dirname(destination_path)
sftp_hook.create_directory(dir_path)
with NamedTemporaryFile("w") as tmp:
gcs_hook.download(
bucket_name=self.source_bucket,
object_name=source_object,
filename=tmp.name,
)
sftp_hook.store_file(destination_path, tmp.name)
if self.move_object:
self.log.info("Executing delete of gs://%s/%s", self.source_bucket, source_object)
gcs_hook.delete(self.source_bucket, source_object)
| [
"[email protected]"
] | |
0856a12681340f456cc6cc7d768fd51a3f9de6b1 | 0d76ba0da5446f20e500b7e31f53821b14cb49d8 | /Rosalind/python/revc.py | 5f9a8f42871395c24b14702d4eb8085fa05e2fe5 | [] | no_license | filwaitman/playground | 948aa687be06d456c86b65ee3ab5fb9792149459 | dfdfab9002bff3a04f37e0c161363a864cd30f3e | refs/heads/master | 2021-01-12T12:59:49.057832 | 2020-01-26T18:51:02 | 2020-01-26T18:51:02 | 68,865,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | # -*- coding: utf-8 -*-
# http://rosalind.info/problems/revc/
import sys
def reverse_complement(string):
reversed_ = string[::-1]
reversed_ = reversed_.replace('A', 'X').replace('T', 'A').replace('X', 'T')
reversed_ = reversed_.replace('C', 'X').replace('G', 'C').replace('X', 'G')
return reversed_
if __name__ == '__main__':
string = sys.argv[1]
reverse_complement(string)
| [
"[email protected]"
] | |
ca5283b55effaf4bcd77fe732ae29fc5c471bee6 | 7233716fbf9fff94240d14770b3fc3f3ada10d9b | /devel/lib/python2.7/dist-packages/ur_msgs/msg/_RobotStateRTMsg.py | deb51da650ecaf1eab4673427a0d7e51809f05b6 | [] | no_license | shashankseth01/E-yantra | 58d42dce90667ca37f31f2cf111ee98c39468617 | 23432e058fce7733bd1a8399fd6edc20967fa6a3 | refs/heads/main | 2023-02-04T00:36:57.230996 | 2020-12-21T09:55:23 | 2020-12-21T09:55:23 | 316,716,460 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | /home/shashank/ws_task3/devel/.private/ur_msgs/lib/python2.7/dist-packages/ur_msgs/msg/_RobotStateRTMsg.py | [
"[email protected]"
] | |
acacce0291355a9534ff0563af6514333051fb77 | b6aa9768dbac327943e0220df1c56ce38adc6de1 | /91_decode-ways.py | 039d1992172f104ffa65150443dd06340ea20d3c | [] | no_license | Khrystynka/LeetCodeProblems | f86e4c1e46f70f874924de137ec5efb2f2518766 | 917bd000c2a055dfa2633440a61ca4ae2b665fe3 | refs/heads/master | 2021-03-17T00:51:10.102494 | 2020-09-28T06:31:03 | 2020-09-28T06:31:03 | 246,954,162 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | # Problem Title: Decode Ways
class Solution(object):
def numDecodings(self, s):
"""
:type s: str
:rtype: int
"""
lst = map(int, s)
p_1 = 1
res = 1
for i in range(len(lst)):
p_2 = p_1
p_1 = res
if lst[i] == 0:
if i == 0 or lst[i-1] > 2 or lst[i-1] == 0:
return 0
else:
res = p_2
else:
res = p_1
if i != 0 and lst[i] > 0 and (lst[i-1] == 1 or (lst[i-1] == 2 and lst[i] <= 6)):
res += p_2
return res
| [
"[email protected]"
] | |
523230368fdadb0448796ee631c05daf2fe86381 | 8d0be6bdb480caa895b6cea1884b40c51cf25156 | /experiment/python/convert-json-and-yaml.py | ed765819326d33d274bdfd85e90245538e9aa335 | [
"MIT"
] | permissive | atb00ker/scripts-lab | 8642138574ac3403278c004b2c67f0200b29a83d | 71a5cc9c7f301c274798686db4a227e84b65926a | refs/heads/master | 2021-09-09T22:15:36.133142 | 2021-09-04T18:30:31 | 2021-09-04T18:30:54 | 163,108,084 | 2 | 0 | MIT | 2019-10-08T19:44:00 | 2018-12-25T19:54:54 | Python | UTF-8 | Python | false | false | 318 | py | import json
import yaml
with open('input.json') as js:
data = json.load(js)
with open('output.yaml', 'w') as yml:
yaml.dump(data, yml, default_flow_style=False, allow_unicode=True)
with open('input.yml') as yml:
data = yaml.load(yml)
with open('output.json', 'w') as js:
js.write(json.dumps(data))
| [
"[email protected]"
] | |
d33a22fa1084f81223eba1e0e845c919e416ab5f | 713f9168a7ba68740bb9b4ea6994e853a56d2d5c | /2022-05-30-python/day1/save_wikipedia.py | 3e83f5e8c22cdae5d35f0f32144e67b89203c3cc | [] | no_license | marko-knoebl/courses-code | ba7723c9a61861b037422670b98276fed41060e2 | faeaa31c9a156a02e4e9169bc16f229cdaee085d | refs/heads/master | 2022-12-29T02:13:12.653745 | 2022-12-16T09:21:18 | 2022-12-16T09:21:18 | 142,756,698 | 16 | 10 | null | 2022-03-08T22:30:11 | 2018-07-29T11:51:04 | Jupyter Notebook | UTF-8 | Python | false | false | 270 | py | from urllib.request import urlopen
# make a HTTP request
req = urlopen("https://en.wikipedia.org")
# read content as utf-8 string
content = req.read().decode("utf-8")
# save to file
file = open("wikipedia.html", "w", encoding="utf-8")
file.write(content)
file.close()
| [
"[email protected]"
] | |
17e90b8c30eaba457e03faf76fca9908aaf1b8b0 | 32cf9c3099c36a46804e393dd1491a8954f50263 | /2019.04.11 - 프로필, 인스타 생성/PROJECT05_10/PROJECT05/boards/migrations/0001_initial.py | 871a328c5df4bde811a7395a602cfaa923694ed8 | [] | no_license | ash92kr/s_code | ce3bda6a403600892750e181dca5ed8c4caebcb1 | 92eace551d132b91ee91db6c0afd38b93f9b647b | refs/heads/master | 2020-04-12T00:27:07.043091 | 2019-05-21T08:17:39 | 2019-05-21T08:17:39 | 162,200,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | # Generated by Django 2.1.7 on 2019-03-21 11:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Board',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=10)),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
]
| [
"[email protected]"
] | |
9150360bc6801d8550fa7251315721013196303e | 34d8cbc0471be7e051466fbd14a0fc7730bff133 | /peer_recieve.py | e8884c30547531ab674cc7f82cbb8b65bbe95427 | [] | no_license | jk-programs/basiccoin | 59e35c1fb547add8d2fe1156fbc7fc348a1d8f0f | 6fd285525febd82733b7e7ef604428c3a8fe3ff4 | refs/heads/master | 2021-01-18T18:45:19.900004 | 2014-09-04T21:01:31 | 2014-09-04T21:01:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,762 | py | """When a peer talks to us, this is how we generate a response. This is the external API.
"""
import networking, custom, tools, blockchain, time
def security_check(dic):
if 'version' not in dic or dic['version'] != custom.version:
return {'bool': False, 'error': 'version'}
else:
#we could add security features here.
return {'bool': True, 'newdic': dic}
def blockCount(dic, DB):
length = DB['length']
if length >= 0:
return {'length': length,
'diffLength': DB['diffLength']}
else:
return {'length': -1, 'diffLength': '0'}
def rangeRequest(dic, DB):
ran = dic['range']
out = []
counter = 0
while (len(tools.package(out)) < custom.max_download
and ran[0] + counter <= ran[1]):
block = tools.db_get(ran[0] + counter, DB)
if 'length' in block:
out.append(block)
counter += 1
return out
def txs(dic, DB):
return DB['txs']
def pushtx(dic, DB):
DB['suggested_txs'].put(dic['tx'])
return 'success'
def pushblock(dic, DB):
if 'blocks' in dic:
for block in dic['blocks']:
DB['suggested_blocks'].put([block, dic['peer']])
else:
DB['suggested_blocks'].put([dic['block'], dic['peer']])
return 'success'
def main(dic, DB):
funcs = {'blockCount': blockCount, 'rangeRequest': rangeRequest,
'txs': txs, 'pushtx': pushtx, 'pushblock': pushblock}
if 'type' not in dic:
return 'oops: ' +str(dic)
if dic['type'] not in funcs:
return ' '.join([dic['type'], 'is not in the api'])
check = security_check(dic)
if not check['bool']:
return check
try:
return funcs[dic['type']](check['newdic'], DB)
except:
pass
| [
"[email protected]"
] | |
7dfa2ddf781a50a0947412c4c7840743bc9f8eff | e3c6087ce7178c0608b1501e14c8f905fd14c3d2 | /src/app/push/user/user_manager.py | d23e38bdf3b2d4b20ea7c0717b71d2bff3d2b60d | [] | no_license | campanulamediuml/iot_server | 314fcef9b6bec2f1354390ecad2d0acb18dcf06e | 6657ad9895445fb668f027affc6346eeeda58e67 | refs/heads/master | 2022-12-14T02:53:05.007754 | 2020-02-01T08:12:02 | 2020-02-01T08:12:02 | 237,582,696 | 1 | 0 | null | 2022-12-08T07:03:48 | 2020-02-01T08:04:25 | Python | UTF-8 | Python | false | false | 2,820 | py | from app.push.user.user import User
from app.push.relay import Relay
from data.server import Data
import time
class UserManager(object):
def __init__(self):
self._user_dict = {}
self._user_handler_dict = {}
self._admin_dict = {}
def update(self):
# print('kill')
del_list = list()
for user_id, user in self._user_dict.items():
if user.has_heartbeat() is True:
continue
del_list.append(user_id)
for user_id in del_list:
Relay.user_exit(user_id)
# 更新用户管理器
exit_admin = []
for admin_sid,last_connect_time in self._admin_dict.items():
if int(time.time()) -last_connect_time > 10:
exit_admin.append(admin_sid)
for i in exit_admin:
self._admin_dict.pop(i)
print('管理员',i,'长时间无心跳,已被杀死')
return
def update_heart_beat(self,sid):
if sid in self._admin_dict:
self._admin_dict[sid] = int(time.time())
return
user = self.get_user_by_sid(sid)
if user is None:
Relay.send_disconnect(sid)
else:
user.update_heart_beat()
return
# print(self._admin_dict)
def get_user(self,user_id):
if user_id in self._user_dict:
return self._user_dict[user_id]
return None
def add_admin(self,sid):
self._admin_dict[sid] = int(time.time())
print(sid,'链接上推送服务器了!')
return
def is_admin(self,sid):
if sid in self._admin_dict:
return True
return False
# 后台链接
def kill_admin(self,sid):
if sid in self._admin_dict:
self._admin_dict.pop(sid)
return
# 清除admin
def get_all_admin(self):
return self._admin_dict
# =============管理后台链接=============
def login(self,user_id):
user = self.create_user(user_id)
# 注册用户信息
self._user_dict[user_id] = user
self._user_handler_dict[user._sid] = user
return
def create_user(self,user_id,sid):
user = User(user_id)
user.init_from_data(sid)
return user
# 创建用户
def get_user_by_sid(self,sid):
if sid in self._user_handler_dict:
return self._user_handler_dict[sid]
else:
return None
# 通过sid获取用户
def user_exit(self,user_id):
# 玩家退出
user = self._user_dict[user_id]
if user_id in self._user_dict:
self._user_dict.pop(user_id)
if user.get_sid() in self._user_handler_dict:
self._user_handler_dict.pop(user.get_sid())
return
| [
"[email protected]"
] | |
64114d8d6de771147d00d1ce7e7e5513c4b883ea | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_photostats.py | b655b753c702e900bf321ce1a12133ccbdddb3c3 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py |
#calss header
class _PHOTOSTATS():
def __init__(self,):
self.name = "PHOTOSTATS"
self.definitions = photostat
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['photostat']
| [
"[email protected]"
] | |
0199c7123b0f48afe8371149ef949d0f1f1808d2 | f662aa3ce7896ca0283cae38df8ef824c1b80c9a | /library/tests/test_setup.py | 9579313b7b5b97b052f09122ff03f5decae81e69 | [
"MIT"
] | permissive | pimoroni/plasma | bd7ddebbc60ae7cc9c2561408b52fc46bf810672 | 7857c44255285aac061a9064dd033fd63bbbda29 | refs/heads/master | 2023-02-10T13:27:17.565867 | 2023-01-30T17:27:28 | 2023-01-30T17:27:28 | 155,544,928 | 12 | 9 | MIT | 2021-11-06T04:14:19 | 2018-10-31T11:17:40 | Python | UTF-8 | Python | false | false | 369 | py | """Test Plasma basic initialisation."""
import mock
def test_legacy_setup(GPIO):
"""Test init succeeds and GPIO pins are setup."""
from plasma import legacy as plasma
plasma.show()
GPIO.setmode.assert_called_once_with(GPIO.BCM)
GPIO.setup.assert_has_calls([
mock.call(plasma.DAT, GPIO.OUT),
mock.call(plasma.CLK, GPIO.OUT)
])
| [
"[email protected]"
] | |
ce67b4c7a1213011dedbd40bb03a735ddee6c245 | 93a7db386dfa0ac0dc369cc7f4b974224c801d8d | /deploy/ngram-all/scripts/ngram-30.py | 4575b6d9056180b9f17d03dcdbc9ea32e3fb42f8 | [] | no_license | lingxiao/good-great-combo | e051f20c89b7317a14ca5cee357bda7b095ce174 | 4d2691866bc21e2c542354ad3aae6f369eb86c87 | refs/heads/master | 2021-01-19T19:30:43.391759 | 2017-04-09T12:35:15 | 2017-04-09T12:35:15 | 83,699,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,420 | py | ############################################################
# Module : A series of measures on the graph for experiments
# Date : April 2nd, 2017
# Author : Xiao Ling
############################################################
import os
import re
import networkx as nx
from utils import *
from scripts import *
from app.config import PATH
############################################################
'''
paths
'''
_root = os.path.join(PATH['directories']['deploy'], 'ngram-all')
_word_dir = os.path.join(_root, 'words')
_word_pair_dir = os.path.join(_root, 'pairs')
_output_dir = os.path.join(_root, 'outputs')
_script_dir = os.path.join(_root ,'scripts')
'''
@Use: collect ngram counts
'''
batch = 30
word_path = os.path.join(_word_dir , 'batch-' + str(batch) + '.txt')
word_pair_path = os.path.join(_word_pair_dir , 'batch-' + str(batch) + '.txt')
pattern_path = PATH['assets']['patterns']
ngram_dir = PATH['ngrams']['full']
out_dir = _output_dir
log_dir = PATH['directories']['log']
# ngram_by_words( word_path
# , ngram_dir
# , os.path.join(out_dir,'batch-' + str(batch) + '.txt')
# , log_dir
# , debug = False)
collect_ngram_patterns( word_pair_path
, pattern_path
, ngram_dir
, out_dir
, log_dir
, debug = False)
| [
"[email protected]"
] | |
1b2a2f389775c77ff7576d08af07d659dab43a09 | 63350ed1a8d2de2f7d16687b20c93e76f5a9f5a6 | /fk_partner_banks/models/__init__.py | 6b6c75e8a174a9eeda2ca09225807df59090fedf | [] | no_license | rosalesdc/fukuoka_testing | bb91277f489c02d492bfa26ba6377007f9cf2701 | 6004845e9359ccf33fb6159b826120499bc8c104 | refs/heads/master | 2020-04-17T01:20:57.455110 | 2019-12-04T23:07:42 | 2019-12-04T23:07:42 | 166,088,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | # -*- coding: utf-8 -*-
from . import res_partner
from . import res_partner_bancos | [
"[email protected]"
] | |
7418fe42514906ca3b54fe3292c0e73322f20b47 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-cdn/huaweicloudsdkcdn/v1/model/referer_rsp.py | 5e464eeb471ed9c49f7aa9e96766d4e22f3f2f69 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,440 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RefererRsp:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'referer_type': 'int',
'referer_list': 'str',
'include_empty': 'bool'
}
attribute_map = {
'referer_type': 'referer_type',
'referer_list': 'referer_list',
'include_empty': 'include_empty'
}
def __init__(self, referer_type=None, referer_list=None, include_empty=None):
"""RefererRsp - a model defined in huaweicloud sdk"""
self._referer_type = None
self._referer_list = None
self._include_empty = None
self.discriminator = None
if referer_type is not None:
self.referer_type = referer_type
if referer_list is not None:
self.referer_list = referer_list
if include_empty is not None:
self.include_empty = include_empty
@property
def referer_type(self):
"""Gets the referer_type of this RefererRsp.
Referer类型。取值:0代表不设置Referer过滤;1代表黑名单;2代表白名单。默认取值为0。
:return: The referer_type of this RefererRsp.
:rtype: int
"""
return self._referer_type
@referer_type.setter
def referer_type(self, referer_type):
"""Sets the referer_type of this RefererRsp.
Referer类型。取值:0代表不设置Referer过滤;1代表黑名单;2代表白名单。默认取值为0。
:param referer_type: The referer_type of this RefererRsp.
:type: int
"""
self._referer_type = referer_type
@property
def referer_list(self):
"""Gets the referer_list of this RefererRsp.
请输入域名或IP地址,以“;”进行分割,域名、IP地址可以混合输入,支持泛域名添加。输入的域名、IP地址总数不超过100个。当设置防盗链时,此项必填。
:return: The referer_list of this RefererRsp.
:rtype: str
"""
return self._referer_list
@referer_list.setter
def referer_list(self, referer_list):
"""Sets the referer_list of this RefererRsp.
请输入域名或IP地址,以“;”进行分割,域名、IP地址可以混合输入,支持泛域名添加。输入的域名、IP地址总数不超过100个。当设置防盗链时,此项必填。
:param referer_list: The referer_list of this RefererRsp.
:type: str
"""
self._referer_list = referer_list
@property
def include_empty(self):
"""Gets the include_empty of this RefererRsp.
是否包含空Referer。如果是黑名单并开启该选项,则表示无referer不允许访问。如果是白名单并开启该选项,则表示无referer允许访问。默认不包含。
:return: The include_empty of this RefererRsp.
:rtype: bool
"""
return self._include_empty
@include_empty.setter
def include_empty(self, include_empty):
"""Sets the include_empty of this RefererRsp.
是否包含空Referer。如果是黑名单并开启该选项,则表示无referer不允许访问。如果是白名单并开启该选项,则表示无referer允许访问。默认不包含。
:param include_empty: The include_empty of this RefererRsp.
:type: bool
"""
self._include_empty = include_empty
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RefererRsp):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
53cfa943c71da187b76615b9e210acd591c177fb | e616ea35ead674ebb4e67cae54768aaaeb7d89c9 | /project/alma/groups/migrations/0001_initial.py | ea300a72ba26028fa8b8e7cb76ef846668025ebd | [] | no_license | VWApplications/VWAlmaAPI | 12bb1888533cf987739b0e069737afa6337141e1 | 3a8009b17518384c269dfee3c8fe44cbe2567cc0 | refs/heads/master | 2022-04-02T10:26:49.832202 | 2020-02-12T04:46:31 | 2020-02-12T04:46:31 | 161,098,215 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,648 | py | # Generated by Django 2.1.4 on 2019-10-20 03:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('disciplines', '0007_remove_discipline_was_group_provided'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, help_text='Data na qual o objeto foi criado.', verbose_name='Criado em')),
('updated_at', models.DateTimeField(auto_now=True, help_text='Data na qual o objeto foi atualizado.', verbose_name='Atualizado em')),
('title', models.CharField(help_text='Título do grupo', max_length=50, verbose_name='Título')),
('students_limit', models.PositiveIntegerField(default=0, help_text='Limite de estudantes do grupo', verbose_name='Limite de estudantes')),
('discipline', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='groups', to='disciplines.Discipline', verbose_name='Discipline')),
('students', models.ManyToManyField(blank=True, related_name='student_groups', to=settings.AUTH_USER_MODEL, verbose_name='Students')),
],
options={
'db_table': 'groups',
'ordering': ['title', 'created_at'],
},
),
]
| [
"[email protected]"
] | |
14aeae04e3c6f6a5a94f8f36cffad0f269855994 | 68f757e7be32235c73e316888ee65a41c48ecd4e | /백준_python/2000/2500_2599/2501.py | c49d55d4f683ab54758e774c9f7bb12d31669f5c | [] | no_license | leejongcheal/algorithm_python | b346fcdbe9b1fdee33f689477f983a63cf1557dc | f5d9bc468cab8de07b9853c97c3db983e6965d8f | refs/heads/master | 2022-03-05T20:16:21.437936 | 2022-03-03T01:28:36 | 2022-03-03T01:28:36 | 246,039,901 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | def f(N):
for i in range(1, N//2+1):
if N % i == 0:
yield i
yield N
N, K = map(int, input().split())
L = list(f(N))
try:
print(L[K - 1])
except:
print(0)
| [
"[email protected]"
] | |
bb3964e9c470d43dd2e31774435b4096a7129340 | 37ba62db61fc4ec62634638763a984cbfbe40fe3 | /day09/02 作业讲解.py | 5caee326901bbae6f5a9b2598f304b118ebe05ef | [] | no_license | lt910702lt/python | ca2768aee91882c893a9bc6c1bdd1b455ebd511f | c6f13a1a9461b18df17205fccdc28f89854f316c | refs/heads/master | 2020-05-09T22:54:22.587206 | 2019-09-17T09:02:53 | 2019-09-17T09:02:53 | 181,485,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,316 | py | ###第一题: 有如下文件,里面的内容如下,分别完成以下功能:
'''
老男孩是一个培训机构
为学生服务,
为学生未来,
都是骗人的,哈哈
'''
## 1.1. 将原文件全部读出来并打印
# f = open("oldboy", mode="r", encoding="utf-8")
# s = f.read()
# f.flush()
# f.close()
# print(s)
# 1.2. 在原文件后面追加一行内容:信不信由你,反正我信了
# f = open("oldboy", mode="a", encoding="utf-8")
# f.write("\n信不信由你.反正我信了")
# f.flush()
# f.close()
# 1.3. 将原文件全部读出来,并在后面添加一行内容:信不信由你,反正我信了
# f = open("oldboy", mode="r+", encoding="utf-8")
# f.read()
# f.write("\n信不信由你.反正我信了")
# f.flush()
# f.close()
# 1.4. 将源文件全部清空,换成下面内容
# f = open("oldboy", mode="w+", encoding="utf-8")
# f.write('''每天坚持一点,
# 每天努力一点,
# 每天多思考一点,
# 慢慢你会发现,
# 你的进步越来越大。
# ''')
# f.flush()
# f.close()
# 1.5. 将源文件内容全部读出来,并在"都是骗人的,哈哈"这一行前面加入"你们九信吧",将更改之后的新内容,写入到一个新的文件a1.txt
# import os
#
# with open("oldboy", mode="r", encoding="utf-8") as f1, open("oldboy_new", mode="w", encoding="utf-8") as f2:
# s = f1.read()
# ss = s.replace("都是骗人的,哈哈", "你们就信吧!\n都是骗人的,哈哈")
# f2.write(ss)
# os.remove("oldboy")
# os.rename("oldboy_new", "oldboy")
###第二题: 有文件内容如下,通过代码,将其构建成这种数据类型
# 序号 部门 人数 平均年龄 备注
# 1 Python 30 26 单身狗
# 2 Linux 26 30 没对象
# 3 运营部 20 24 女生多
# [{'序号': '1', '部门': 'Python', '人数': '30', "平均年龄": '26', '备注': '单身狗'}]
# ......
f = open("a6", mode="r", encoding='utf-8')
line = f.readline()
lst = line.split() # 第一行切割完成,基础数据就完成了
result = [] # 定义一个结果列表
for lin in f:
ll = lin.split() # 每一行的数据
dic = {} # 将每一行切割后的结果放入到不同的字典里
for i in range(len(ll)):
dic[lst[i]] = ll[i]
result.append(dic) # 将字典添加到结果列表里
print(result) | [
"[email protected]"
] | |
c8b3a630c7f0945eab12bd6d869373ec854be477 | f1324b8a6da3a9ba8fc51c7dd268c2c788aaf796 | /Gardener/python/variables/qq2zzEWKcorrectionsWeight.py | d329f7fc9659ae57c87d3822c490f30e2530d773 | [] | no_license | scodella/LatinoAnalysis | 82ad4df70d971902254f73036ed0b07687411bdf | 5c1ba282f683d9d7a1f7dd3a5eec59cba619955d | refs/heads/master | 2023-09-05T04:08:21.554924 | 2022-08-04T10:36:36 | 2022-08-04T10:36:36 | 69,543,022 | 0 | 15 | null | 2023-07-18T07:51:14 | 2016-09-29T07:38:38 | Python | UTF-8 | Python | false | false | 10,954 | py | from LatinoAnalysis.Gardener.gardening import TreeCloner
import numpy
import ROOT
import sys
import optparse
import re
import warnings
import os.path
from array import array;
class qq2zzEWKcorrectionsWeightFiller(TreeCloner):
def __init__(self):
pass
def help(self):
return '''Add weight to cope with electroweak corrections'''
def addOptions(self,parser):
pass
def checkOptions(self,opts):
pass
def process(self,**kwargs):
tree = kwargs['tree']
input = kwargs['input']
output = kwargs['output']
self.connect(tree,input)
newbranches = ['ewkZZ', 'ewkZZuncertainty']
self.clone(output,newbranches)
ewkZZ = numpy.ones(1, dtype=numpy.float32)
ewkZZuncertainty = numpy.ones(1, dtype=numpy.float32)
self.otree.Branch('ewkZZ' , ewkZZ , 'ewkZZ/F')
self.otree.Branch('ewkZZuncertainty' , ewkZZuncertainty , 'ewkZZuncertainty/F')
nentries = self.itree.GetEntries()
print 'Total number of entries: ',nentries
itree = self.itree
otree = self.otree
cmssw_base = os.getenv('CMSSW_BASE')
try:
ROOT.gROOT.LoadMacro(cmssw_base+'/src/LatinoAnalysis/Gardener/python/variables/qq2wvEWKcorrectionsWeight.C+g')
except RuntimeError:
ROOT.gROOT.LoadMacro(cmssw_base+'/src/LatinoAnalysis/Gardener/python/variables/qq2wvEWKcorrectionsWeight.C++g')
#----------------------------------------------------------------------------------------------------
qq2wvEWKcorrections = ROOT.qq2wvEWKcorrections(cmssw_base+'/src/LatinoAnalysis/Gardener/python/data/ewk/ZZ_EwkCorrections.dat')
print '- Starting eventloop'
step = 5000
for i in xrange(nentries):
itree.GetEntry(i)
if i > 0 and i%step == 0.:
print i,'events processed.'
temp_ptl1 = itree.std_vector_LHElepton_pt[0]
temp_etal1 = itree.std_vector_LHElepton_eta[0]
temp_phil1 = itree.std_vector_LHElepton_phi[0]
temp_idl1 = itree.std_vector_LHElepton_id[0]
temp_ptl2 = itree.std_vector_LHElepton_pt[1]
temp_etal2 = itree.std_vector_LHElepton_eta[1]
temp_phil2 = itree.std_vector_LHElepton_phi[1]
temp_idl2 = itree.std_vector_LHElepton_id[1]
temp_ptl3 = itree.std_vector_LHElepton_pt[2]
temp_etal3 = itree.std_vector_LHElepton_eta[2]
temp_phil3 = itree.std_vector_LHElepton_phi[2]
temp_idl3 = itree.std_vector_LHElepton_id[2]
temp_ptl4 = itree.std_vector_LHElepton_pt[3]
temp_etal4 = itree.std_vector_LHElepton_eta[3]
temp_phil4 = itree.std_vector_LHElepton_phi[3]
temp_idl4 = itree.std_vector_LHElepton_id[3]
temp_ptv1 = itree.std_vector_LHEneutrino_pt[0]
temp_etav1 = itree.std_vector_LHEneutrino_eta[0]
temp_phiv1 = itree.std_vector_LHEneutrino_phi[0]
temp_idv1 = itree.std_vector_LHEneutrino_id[0]
temp_ptv2 = itree.std_vector_LHEneutrino_pt[1]
temp_etav2 = itree.std_vector_LHEneutrino_eta[1]
temp_phiv2 = itree.std_vector_LHEneutrino_phi[1]
temp_idv2 = itree.std_vector_LHEneutrino_id[1]
temp_ptv3 = itree.std_vector_LHEneutrino_pt[2]
temp_etav3 = itree.std_vector_LHEneutrino_eta[2]
temp_phiv3 = itree.std_vector_LHEneutrino_phi[2]
temp_idv3 = itree.std_vector_LHEneutrino_id[2]
temp_ptv4 = itree.std_vector_LHEneutrino_pt[3]
temp_etav4 = itree.std_vector_LHEneutrino_eta[3]
temp_phiv4 = itree.std_vector_LHEneutrino_phi[3]
temp_idv4 = itree.std_vector_LHEneutrino_id[3]
temp_ptq1 = itree.std_vector_LHEparton_pt[0]
temp_etaq1 = itree.std_vector_LHEparton_eta[0]
temp_phiq1 = itree.std_vector_LHEparton_phi[0]
temp_idq1 = itree.std_vector_LHEparton_id[0]
temp_ptq2 = itree.std_vector_LHEparton_pt[1]
temp_etaq2 = itree.std_vector_LHEparton_eta[1]
temp_phiq2 = itree.std_vector_LHEparton_phi[1]
temp_idq2 = itree.std_vector_LHEparton_id[1]
temp_ptq3 = itree.std_vector_LHEparton_pt[2]
temp_etaq3 = itree.std_vector_LHEparton_eta[2]
temp_phiq3 = itree.std_vector_LHEparton_phi[2]
temp_idq3 = itree.std_vector_LHEparton_id[2]
temp_ptq4 = itree.std_vector_LHEparton_pt[3]
temp_etaq4 = itree.std_vector_LHEparton_eta[3]
temp_phiq4 = itree.std_vector_LHEparton_phi[3]
temp_idq4 = itree.std_vector_LHEparton_id[3]
x1 = itree.pdfx1
x2 = itree.pdfx2
id1 = itree.pdfid1
id2 = itree.pdfid2
ptl1 = 0.
etal1 = 0.
phil1 = 0.
idl1 = 0.
ptl2 = 0.
etal2 = 0.
phil2 = 0.
idl2 = 0.
ptv1 = 0
etav1 = 0
phiv1 = 0
idv1 = 0.
ptv2 = 0.
etav2 = 0.
phiv2 = 0.
idv2 = 0.
# 1 should be the first Z
# 2 should be the second Z
# assign the 4 leptons according to flavour and mass
if temp_ptl1 > 0 and temp_ptl3 > 0 :
# Z>ll and Z>ll
if (abs(temp_idl1) == abs(temp_idl2)) :
l1 = ROOT.TLorentzVector()
l1.SetPtEtaPhiM(temp_ptl1, temp_etal1, temp_phil1, 0) # fine approx massless leptons for check
l2 = ROOT.TLorentzVector()
l2.SetPtEtaPhiM(temp_ptl2, temp_etal2, temp_phil2, 0) # fine approx massless leptons for check
mass = (l1+l2).M()
if abs (mass - 91.1876) < 3 :
ptl1 = temp_ptl1
etal1 = temp_etal1
phil1 = temp_phil1
idl1 = temp_idl1
ptv1 = temp_ptl2
etav1 = temp_etal2
phiv1 = temp_phil2
idv1 = temp_idl2
ptl2 = temp_ptl3
etal2 = temp_etal3
phil2 = temp_phil3
idl2 = temp_idl3
ptv2 = temp_ptl4
etav2 = temp_etal4
phiv2 = temp_phil4
idv2 = temp_idl4
if (abs(temp_idl1) == abs(temp_idl3)) :
l1 = ROOT.TLorentzVector()
l1.SetPtEtaPhiM(temp_ptl1, temp_etal1, temp_phil1, 0) # fine approx massless leptons for check
l2 = ROOT.TLorentzVector()
l2.SetPtEtaPhiM(temp_ptl3, temp_etal3, temp_phil3, 0) # fine approx massless leptons for check
mass = (l1+l2).M()
if abs (mass - 91.1876) < 3 :
ptl1 = temp_ptl1
etal1 = temp_etal1
phil1 = temp_phil1
idl1 = temp_idl1
ptv1 = temp_ptl3
etav1 = temp_etal3
phiv1 = temp_phil3
idv1 = temp_idl3
ptl2 = temp_ptl2
etal2 = temp_etal2
phil2 = temp_phil2
idl2 = temp_idl2
ptv2 = temp_ptl4
etav2 = temp_etal4
phiv2 = temp_phil4
idv2 = temp_idl4
if (abs(temp_idl1) == abs(temp_idl4)) :
l1 = ROOT.TLorentzVector()
l1.SetPtEtaPhiM(temp_ptl1, temp_etal1, temp_phil1, 0) # fine approx massless leptons for check
l2 = ROOT.TLorentzVector()
l2.SetPtEtaPhiM(temp_ptl4, temp_etal4, temp_phil4, 0) # fine approx massless leptons for check
mass = (l1+l2).M()
if abs (mass - 91.1876) < 3 :
ptl1 = temp_ptl1
etal1 = temp_etal1
phil1 = temp_phil1
idl1 = temp_idl1
ptv1 = temp_ptl4
etav1 = temp_etal4
phiv1 = temp_phil4
idv1 = temp_idl4
ptl2 = temp_ptl2
etal2 = temp_etal2
phil2 = temp_phil2
idl2 = temp_idl2
ptv2 = temp_ptl3
etav2 = temp_etal3
phiv2 = temp_phil3
idv2 = temp_idl3
if temp_ptl1 > 0 and temp_ptv1 > 0 :
# Z>ll and Z>vv
ptl1 = temp_ptl1
etal1 = temp_etal1
phil1 = temp_phil1
idl1 = temp_idl1
ptv1 = temp_ptl2
etav1 = temp_etal2
phiv1 = temp_phil2
idv1 = temp_idl2
ptl2 = temp_ptv1
etal2 = temp_etav1
phil2 = temp_phiv1
idl2 = temp_idv1
ptv2 = temp_ptv2
etav2 = temp_etav2
phiv2 = temp_phiv2
idv2 = temp_idv2
if temp_ptl1 > 0 and temp_ptq1 > 0 :
# Z>ll and Z>qq
ptl1 = temp_ptl1
etal1 = temp_etal1
phil1 = temp_phil1
idl1 = temp_idl1
ptv1 = temp_ptl2
etav1 = temp_etal2
phiv1 = temp_phil2
idv1 = temp_idl2
ptl2 = temp_ptq1
etal2 = temp_etaq1
phil2 = temp_phiq1
idl2 = temp_idq1
ptv2 = temp_ptq2
etav2 = temp_etaq2
phiv2 = temp_phiq2
idv2 = temp_idq2
results_value_and_error = qq2wvEWKcorrections.getqq2WVEWKCorr(ptl1, etal1, phil1, idl1, ptl2, etal2, phil2, idl2, ptv1, etav1, phiv1, idv1, ptv2, etav2, phiv2, idv2, x1, x2, id1, id2, 0)
ewkZZ[0] = results_value_and_error[0]
ewkZZuncertainty[0] = results_value_and_error[1]
otree.Fill()
self.disconnect()
print '- Eventloop completed'
| [
"[email protected]"
] | |
8552b00e5269659b7760cfaf3cb055f996f35ec2 | ff853d7b3773db8de783fd26a76bd92742f85384 | /0x05-python-exceptions/4-list_division.py | ed4e25621fd48459d5909c213142dd2a0c614a32 | [] | no_license | stuartses/holbertonschool-higher_level_programming | 1b3315f624f9c2dc0c63ee3481021c5ed093a81d | 40497b632bf71c3b877cb61fce79b9d82b4519da | refs/heads/master | 2020-09-29T00:51:57.791491 | 2020-05-14T16:51:44 | 2020-05-14T16:51:44 | 226,905,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | #!/usr/bin/python3
def list_division(my_list_1, my_list_2, list_length):
div = 0
div_list = []
for i in range(list_length):
try:
div = my_list_1[i] / my_list_2[i]
except ZeroDivisionError:
print("division by 0")
div = 0
except TypeError:
print("wrong type")
div = 0
except IndexError:
print("out of range")
div = 0
finally:
div_list.append(div)
return div_list
| [
"[email protected]"
] | |
4f6bf8ff33c458c8a330b0f138aae35a6a15d02d | 6a7645b58b68c1f04a75055c0ec638086986d522 | /test/test_preprocessing.py | a2407a3d04fb9eba847ba3e25f37bcfc56ef2b85 | [
"MIT"
] | permissive | DamLabResources/crseek | 1832f0fa7111dd9ae491af947dc9052203a4f2da | 13d8870dc1d3bba6c58ef23772c1a2504e817198 | refs/heads/master | 2021-03-30T17:35:50.061326 | 2018-08-09T12:56:03 | 2018-08-09T12:56:03 | 88,295,772 | 8 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,270 | py | from itertools import product
from functools import partial
import numpy as np
import pandas as pd
import pytest
from Bio.Alphabet import generic_dna, generic_rna, IUPAC
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from crseek import exceptions
from crseek import preprocessing
from crseek import utils
from crseek.estimators import CFDEstimator
class TestBasicInputs(object):
def test_simple_inputs(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
inp = np.array([[spacer, target],
[spacer, target]])
assert preprocessing.check_proto_target_input(inp)
def test_str_inputs(self):
spacer = 'A' * 20
target = 'A' * 20 + 'AGG'
inp = np.array([[spacer, target],
[spacer, target]])
with pytest.raises(ValueError):
preprocessing.check_proto_target_input(inp)
def test_bad_alphabet_inputs(self):
spacer_d = Seq('A' * 20, alphabet=generic_dna)
spacer_r = Seq('A' * 20, alphabet=generic_rna)
target_d = Seq('A' * 20 + 'CGG', alphabet=generic_dna)
target_r = Seq('A' * 20 + 'CGG', alphabet=generic_rna)
checks = [preprocessing.check_proto_target_input,
preprocessing.MatchingTransformer().transform,
preprocessing.OneHotTransformer().transform
]
inp = np.array([[spacer_r, target_r],
[spacer_r, target_r]])
for check in checks:
with pytest.raises(exceptions.WrongAlphabetException):
check(inp)
inp = np.array([[spacer_d, target_d],
[spacer_d, target_d]])
for check in checks:
with pytest.raises(exceptions.WrongAlphabetException):
check(inp)
def test_missing_col(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
inp = np.array([[spacer],
[spacer]])
checks = [preprocessing.check_proto_target_input,
preprocessing.MatchingTransformer().transform,
preprocessing.OneHotTransformer().transform
]
for check in checks:
with pytest.raises(AssertionError):
check(inp)
def test_missing_PAM(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('A' * 20, alphabet=generic_dna)
inp = np.array([[spacer, target],
[spacer, target]])
checks = [preprocessing.check_proto_target_input,
preprocessing.MatchingTransformer().transform,
preprocessing.OneHotTransformer().transform
]
for check in checks:
with pytest.raises(AssertionError):
check(inp)
class TestOneHotEncodingUnAmbig(object):
spacer_alpha = IUPAC.unambiguous_rna
target_alpha = IUPAC.unambiguous_dna
processor = partial(preprocessing.one_hot_encode_row)
cor_shape = 4 * 4 * 20 + 4*4
def check_shape(self, vals):
assert vals.shape[0] == self.cor_shape
def get_pam_pos(self, pam):
sz = len(self.spacer_alpha.letters) * len(self.target_alpha.letters)
pam_order = list(product(sorted(self.target_alpha.letters), repeat=2))
pos = next(num for num, _pam in enumerate(pam_order) if (''.join(_pam)) == pam[-2:])
return 20*sz + pos
def test_encoding(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
cor = np.zeros(self.cor_shape)
sz = len(self.spacer_alpha.letters) * len(self.target_alpha.letters)
locs = np.arange(0, 20 * sz, sz)
cor[locs] = True
cor[self.get_pam_pos('AGG')] = True # GG
res = self.processor(spacer, target)
self.check_shape(res)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_more_encoding(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
cor = np.zeros(self.cor_shape)
sz = len(self.spacer_alpha.letters) * len(self.target_alpha.letters)
locs = np.arange(0, 20 * sz, sz)
cor[locs] = True
cor[0] = False
ua_pos = 3*len(self.target_alpha.letters)
cor[ua_pos] = True
cor[self.get_pam_pos('AGG')] = True # GG
res = self.processor(spacer, target)
self.check_shape(res)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_PAM_encoding(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
sz = len(self.spacer_alpha.letters) * len(self.target_alpha.letters)
locs = np.arange(0, 20 * sz, sz)
ua_pos = 3*len(self.target_alpha.letters)
pams = product(sorted(self.target_alpha.letters), repeat=2)
for pos, (p1, p2) in enumerate(pams):
cor = np.zeros(self.cor_shape)
cor[locs[1:]] = True
cor[ua_pos] = True
cor[20*sz + pos] = True
target = Seq('A' * 20 + 'A' + p1 + p2, alphabet=generic_dna)
res = self.processor(spacer, target)
self.check_shape(res)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_ambigious_target(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('N' + 'A' * 19 + 'AGG', alphabet=generic_dna)
with pytest.raises(AssertionError):
preprocessing.one_hot_encode_row(spacer, target)
class TestOneHotEncodingAmbig(TestOneHotEncodingUnAmbig):
spacer_alpha = IUPAC.unambiguous_rna
target_alpha = IUPAC.ambiguous_dna
cor_shape = 4 * 15 * 20 + 15*15
processor = partial(preprocessing.one_hot_encode_row,
spacer_alphabet=IUPAC.unambiguous_rna,
target_alphabet=IUPAC.ambiguous_dna)
def test_ambigious_target(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('N' + 'A' * 19 + 'AGG', alphabet=generic_dna)
res = self.processor(spacer, target)
self.check_shape(res)
cor = np.zeros(self.cor_shape)
sz = len(self.spacer_alpha.letters) * len(self.target_alpha.letters)
locs = np.arange(0, 20 * sz, sz)
cor[locs] = True
cor[0] = False
cor[8] = True #AN
cor[self.get_pam_pos('AGG')] = True # GG
np.testing.assert_array_equal(cor.astype(bool), res)
def test_ambigious_pam(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('N' + 'A' * 19 + 'ARG', alphabet=generic_dna)
res = self.processor(spacer, target)
self.check_shape(res)
cor = np.zeros(self.cor_shape)
sz = len(self.spacer_alpha.letters) * len(self.target_alpha.letters)
locs = np.arange(0, 20 * sz, sz)
cor[locs] = True
cor[0] = False
cor[8] = True #AN
cor[self.get_pam_pos('ARG')] = True # GG
np.testing.assert_array_equal(cor.astype(bool), res)
class TestOneHotTransformer(object):
def test_transforming(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
cor = np.zeros(21 * 16)
locs = np.arange(0, 20 * 16, 16)
cor[locs] = True
cor[0] = False
cor[12] = True
cor[-6] = True # GG
inp = np.array([[spacer, target],
[spacer, target],
[spacer, target]])
hot_encoder = preprocessing.OneHotTransformer()
res = hot_encoder.transform(inp)
assert res.shape == (3, 21 * 16)
for row in range(3):
np.testing.assert_array_equal(cor.astype(bool), res[row, :])
class TestMatchingEncoding(object):
def test_encoding(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
cor = np.array([True] * 21)
res = preprocessing.match_encode_row(spacer, target)
assert res.shape == (20 + 1,)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_non_standard_pams(self):
spacer = Seq('A' * 20, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGGAAT', alphabet=generic_dna)
cor = np.array([True] * 21)
pam_pattern = preprocessing.make_pam_pattern('NNGRRT')
res = preprocessing.match_encode_row(spacer, target, pam_pattern = pam_pattern)
assert res.shape == (20 + 1,)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_non_standard_pams_and_lengths(self):
spacer = Seq('A' * 25, alphabet=generic_rna)
target = Seq('A' * 25 + 'AGGAAT', alphabet=generic_dna)
cor = np.array([True] * 26)
pam_pattern = preprocessing.make_pam_pattern('NNGRRT')
res = preprocessing.match_encode_row(spacer, target, pam_pattern = pam_pattern)
assert res.shape == (26,)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_more_encoding(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
cor = np.array([False] + [True] * 20)
res = preprocessing.match_encode_row(spacer, target)
assert res.shape == (20 + 1,)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_U_encoding(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
target = Seq('T' + 'A' * 19 + 'AGG', alphabet=generic_dna)
cor = np.array([True] * 21)
res = preprocessing.match_encode_row(spacer, target)
assert res.shape == (20 + 1,)
np.testing.assert_array_equal(cor.astype(bool), res)
def test_transforming(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGG', alphabet=generic_dna)
cor = np.array([False] + [True] * 20)
inp = np.array([[spacer, target],
[spacer, target],
[spacer, target]])
hot_encoder = preprocessing.MatchingTransformer()
res = hot_encoder.transform(inp)
assert res.shape == (3, 20 + 1)
for row in range(3):
np.testing.assert_array_equal(cor.astype(bool), res[row, :])
def test_transforming_novel_pam(self):
spacer = Seq('U' + 'A' * 19, alphabet=generic_rna)
target = Seq('A' * 20 + 'AGGAAT', alphabet=generic_dna)
cor = np.array([False] + [True] * 20)
inp = np.array([[spacer, target],
[spacer, target],
[spacer, target]])
hot_encoder = preprocessing.MatchingTransformer(pam = 'NNGRRT')
res = hot_encoder.transform(inp)
assert res.shape == (3, 20 + 1)
for row in range(3):
np.testing.assert_array_equal(cor.astype(bool), res[row, :])
def make_random_seq(bp):
""" Utility function for making random sequence
Parameters
----------
bp : int
Length of sequence
Returns
-------
str
"""
return ''.join(np.random.choice(list('ACGT'), size=bp))
class TestLocate(object):
def make_basic(self):
# prevent Heisenbugs
np.random.seed(0)
locus = [make_random_seq(50) + 'TTTT' + 'A' * 20 + 'CGG' + 'TTTT' + make_random_seq(50),
make_random_seq(12) + 'TTTT' + 'C' * 19 + 'T' + 'CGG' + 'TTTT' + make_random_seq(50),
make_random_seq(75),
make_random_seq(25) + 'TTTT' + 'T' + 'A' * 19 + 'TGG' + 'TTTT' + make_random_seq(50)]
locus = [SeqRecord(Seq(s, alphabet=generic_dna), id=str(n)) for n, s in enumerate(locus)]
spacers = [Seq('A' * 20, alphabet=generic_rna),
Seq('C' * 19 + 'U', alphabet=generic_rna),
Seq('C' * 19 + 'U', alphabet=generic_rna),
Seq('A' * 20, alphabet=generic_rna)]
cor_pos = [54, 16, np.nan, 29]
cor_strand = [1, 1, np.nan, 1]
cor_target = [Seq('A' * 20 + 'CGG', alphabet=generic_dna),
Seq('C' * 19 + 'T' + 'CGG', alphabet=generic_dna),
np.nan,
Seq('T' + 'A' * 19 + 'TGG', alphabet=generic_dna)]
cor_spacer = [Seq('A' * 20, alphabet=generic_rna),
Seq('C' * 19 + 'U', alphabet=generic_rna),
np.nan,
Seq('A' * 20, alphabet=generic_rna)]
return spacers, locus, cor_target, cor_spacer, cor_pos, cor_strand
@pytest.mark.skipif(utils._missing_casoffinder(), reason="Need CasOff installed")
def test_basic(self):
spacers, locus, cor_target, cor_spacer, cor_pos, cor_strand = self.make_basic()
X = np.array(list(zip(spacers, locus)))
estimator = CFDEstimator.build_pipeline()
nX, loc, _ = preprocessing.locate_hits_in_array(X, estimator, mismatches=6)
assert nX.shape == (4, 2)
assert loc.shape == (4, 2)
np.testing.assert_array_equal(cor_pos, loc[:, 0])
np.testing.assert_array_equal(cor_strand, loc[:, 1])
cX = pd.DataFrame(list(zip(cor_spacer, cor_target))).values
mask = np.array([True, True, False, True])
np.testing.assert_array_equal(cX[mask, :], nX[mask, :])
assert np.isnan(cX[2, 0])
assert np.isnan(cX[2, 1])
def test_exhaustive(self):
spacers, locus, cor_target, cor_spacer, cor_pos, cor_strand = self.make_basic()
X = np.array(list(zip(spacers, locus)))
estimator = CFDEstimator.build_pipeline()
nX, loc, _ = preprocessing.locate_hits_in_array(X, estimator, exhaustive=True)
assert nX.shape == (4, 2)
assert loc.shape == (4, 2)
mask = np.array([0, 1, 3])
np.testing.assert_array_equal(np.array(cor_pos)[mask], loc[mask, 0])
np.testing.assert_array_equal(np.array(cor_strand)[mask], loc[mask, 1])
cX = pd.DataFrame(list(zip(cor_spacer, cor_target))).values
np.testing.assert_array_equal(cX[mask, :], nX[mask, :])
| [
"[email protected]"
] | |
2dae655d7f22efba5c7f9cf43a1aba9ceec09a15 | 48eb84be45129c5904447e36a66170e739a8f8a0 | /Gconnect/housename/migrations/0005_auto_20180925_1333.py | adbd8659975c5a028fa555fbcdeceab9df929f0d | [] | no_license | anjuz1/project | efd806e8fff976168d70711eda36fcf42e3e9dbc | 295ed8efcfff622a64e9072bd2607fe8c147a957 | refs/heads/master | 2020-04-07T00:15:41.259953 | 2018-11-16T16:52:37 | 2018-11-16T16:52:37 | 157,896,677 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,283 | py | # Generated by Django 2.0.7 on 2018-09-25 08:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('housename', '0004_housename_house_mail'),
]
operations = [
migrations.AlterField(
model_name='housename',
name='house_adhar',
field=models.CharField(default='', max_length=38),
),
migrations.AlterField(
model_name='housename',
name='house_district',
field=models.CharField(default='Ernakulam', max_length=50),
),
migrations.AlterField(
model_name='housename',
name='house_mail',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='housename',
name='house_name',
field=models.CharField(default='', max_length=45),
),
migrations.AlterField(
model_name='housename',
name='house_po',
field=models.CharField(default='', max_length=50),
),
migrations.AlterField(
model_name='housename',
name='house_street',
field=models.CharField(default='', max_length=50),
),
]
| [
"[email protected]"
] | |
c7799f2cf6e1db8acdd8381d627ce3b1219455b2 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nnn798.py | 681e1df8097c5b9ea1017e8214cb670235faa582 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 316 | py | ii = [('CookGHP3.py', 1), ('AubePRP2.py', 1), ('UnitAI.py', 1), ('WilbRLW5.py', 1), ('ClarGE2.py', 1), ('WestJIT2.py', 1), ('DibdTRL2.py', 1), ('WadeJEB.py', 3), ('BackGNE.py', 2), ('MereHHB3.py', 6), ('MereHHB.py', 1), ('CoolWHM3.py', 1), ('FitzRNS.py', 1), ('StorJCC.py', 2), ('JacoWHI2.py', 1), ('SomeMMH.py', 1)] | [
"[email protected]"
] | |
3d9fda9f714066e6e0a6180fd2b6f1e16e7b078f | 377317f5aed0299d10dee4d8f1f2879b75a3d78e | /tf_agents/agents/data_converter.py | e359f8f8d084829f78d8662b8829c159bccaf1d1 | [
"Apache-2.0"
] | permissive | emma-resor/agents | 4e3fe14f91f287fe56f2d97fe4ce9e38904d4438 | 464a5118e44056db8981a33c9efe43014adabc69 | refs/heads/master | 2023-08-14T16:23:50.989936 | 2021-09-22T17:31:58 | 2021-09-22T17:31:58 | 409,240,525 | 0 | 0 | Apache-2.0 | 2021-09-22T17:31:59 | 2021-09-22T14:38:42 | null | UTF-8 | Python | false | false | 24,382 | py | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Agent Converter API and converters."""
from __future__ import absolute_import
from __future__ import division
# Using Type Annotations.
from __future__ import print_function
import typing
import tensorflow as tf
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import policy_step
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
from tf_agents.typing import types
from tf_agents.utils import composite
from tf_agents.utils import nest_utils
import typing_extensions as te
def _is_transition_like(value):
"""Helper to identify values that are transition like."""
if isinstance(value, trajectory.Transition):
return True
fields = getattr(value, '_fields', None)
if fields and trajectory.Transition._fields == fields:
return True
return False
def _is_trajectory_like(value):
"""Helper to identify values that are trajectory like."""
if isinstance(value, trajectory.Trajectory):
return True
fields = getattr(value, '_fields', None)
if fields and trajectory.Trajectory._fields == fields:
return True
return False
def _as_tfa_transition(value: typing.Tuple[typing.Any, typing.Any, typing.Any]):
"""Makes sure the transition and its values are TFA types."""
time_step, action_step, next_time_step = value
time_step = ts.TimeStep(*time_step)
action_step = policy_step.PolicyStep(*action_step)
next_time_step = ts.TimeStep(*next_time_step)
return trajectory.Transition(time_step, action_step, next_time_step)
class DataContext(tf.Module):
"""A class that stores useful data for performing data conversions."""
def __init__(
self,
time_step_spec: ts.TimeStep,
action_spec: types.NestedTensorSpec,
info_spec: types.NestedTensorSpec,
policy_state_spec: types.NestedTensorSpec = (),
use_half_transition: bool = False,
):
"""Creates a DataContext.
Note: The context does not store a state spec, or other information about
a Policy's internal state. Policy state is not typically stored in a
replay buffer or on disk, except when the policy explicitly chooses to
store it by adding the state as a field inside its `info` output. In
those cases, the internal policy state spec is represented as part of the
`info_spec`.
Args:
time_step_spec: A nest of `tf.TimeStep` representing the time_steps.
action_spec: A nest of `tf.TypeSpec` representing the actions.
info_spec: A nest of `tf.TypeSpec` representing the policy's info.
(Typically this is the info emitted by the collect policy).
policy_state_spec: A nest of `tf.TypeSpec` representing the policy's
state.
use_half_transition: A special transition that the next_time_step contains
a dummy observation.
Raises:
TypeError: If any of the specs are not nests containing tf.TypeSpec
objects.
"""
def _each_isinstance(spec, spec_types):
"""Checks if each element of `spec` is instance of `spec_types`."""
return all([isinstance(s, spec_types) for s in tf.nest.flatten(spec)])
for (spec, label) in ((time_step_spec, 'time_step_spec'),
(action_spec, 'action_spec'),
(info_spec, 'info_spec')):
if not _each_isinstance(spec, tf.TypeSpec):
raise TypeError(
'{} has to contain TypeSpec (TensorSpec, '
'SparseTensorSpec, etc) objects, but received: {}'
.format(label, spec))
self._time_step_spec = time_step_spec
self._action_spec = action_spec
self._info_spec = info_spec
self._policy_state_spec = policy_state_spec
self._trajectory_spec = trajectory.Trajectory(
step_type=time_step_spec.step_type,
observation=time_step_spec.observation,
action=action_spec,
policy_info=info_spec,
next_step_type=time_step_spec.step_type,
reward=time_step_spec.reward,
discount=time_step_spec.discount)
if use_half_transition:
next_time_step_spec = time_step_spec._replace(
observation=tensor_spec.TensorSpec(
(), dtype=tf.float32, name='observation'))
else:
next_time_step_spec = time_step_spec
self._transition_spec = trajectory.Transition(
time_step=time_step_spec,
action_step=policy_step.PolicyStep(action=action_spec,
state=policy_state_spec,
info=info_spec),
next_time_step=next_time_step_spec)
@property
def time_step_spec(self) -> ts.TimeStep:
return self._time_step_spec
@property
def action_spec(self) -> types.NestedTensorSpec:
return self._action_spec
@property
def info_spec(self) -> types.NestedTensorSpec:
return self._info_spec
@property
def policy_state_spec(self) -> types.NestedTensorSpec:
return self._policy_state_spec
@property
def trajectory_spec(self) -> trajectory.Trajectory:
return self._trajectory_spec
@property
def transition_spec(self) -> trajectory.Transition:
return self._transition_spec
def _validate_trajectory(
value: trajectory.Trajectory,
trajectory_spec: trajectory.Trajectory,
sequence_length: typing.Optional[int],
num_outer_dims: te.Literal[1, 2] = 2): # pylint: disable=bad-whitespace
"""Validate a Trajectory given its spec and a sequence length."""
if not nest_utils.is_batched_nested_tensors(
value, trajectory_spec, num_outer_dims=num_outer_dims,
allow_extra_fields=True):
debug_str_1 = tf.nest.map_structure(lambda tp: tp.shape, value)
debug_str_2 = tf.nest.map_structure(
lambda spec: spec.shape, trajectory_spec)
shape_str = (
'two outer dimensions' if num_outer_dims == 2
else 'one outer dimension')
shape_prefix_str = '[B, T]' if num_outer_dims == 2 else '[B]'
raise ValueError(
'All of the Tensors in `value` must have {shape_str}. Specifically, '
'tensors must have shape `{shape_prefix_str} + spec.shape`.\n'
'Full shapes of value tensors:\n {debug_str_1}.\n'
'Expected shapes (excluding the {shape_str}):\n {debug_str_2}.'
.format(
shape_str=shape_str,
debug_str_1=debug_str_1,
debug_str_2=debug_str_2,
shape_prefix_str=shape_prefix_str))
# If we have a time dimension and a train_sequence_length, make sure they
# match.
if sequence_length is not None:
def check_shape(path, t): # pylint: disable=invalid-name
if t.shape[1] != sequence_length:
debug_str = tf.nest.map_structure(lambda tp: tp.shape, value)
raise ValueError(
'The agent was configured to expect a `sequence_length` '
'of \'{seq_len}\'. Value is expected to be shaped `[B, T] + '
'spec.shape` but at least one of the Tensors in `value` has a '
'time axis dim value \'{t_dim}\' vs '
'the expected \'{seq_len}\'.\nFirst such tensor is:\n\t'
'value.{path}. \nFull shape structure of '
'value:\n\t{debug_str}'.format(
seq_len=sequence_length,
t_dim=t.shape[1],
path=path,
debug_str=debug_str))
nest_utils.map_structure_with_paths(check_shape, value)
def _validate_transition(value: trajectory.Transition,
transition_spec: trajectory.Transition,
num_outer_dims: int):
"""Checks the given Transition for batch and time outer dimensions."""
if value.action_step.state:
# When state is not (), it does not have time dimension, therefore it needs
# to be validated separately.
_validate_state(value.action_step.state,
transition_spec.action_step.state)
action_step_without_state = value.action_step._replace(state=())
value_to_validate = value._replace(action_step=action_step_without_state)
action_spec_without_state = (
transition_spec.action_step._replace(state=()))
spec_to_validate = transition_spec._replace(
action_step=action_spec_without_state)
else:
value_to_validate = value
spec_to_validate = transition_spec
if not nest_utils.is_batched_nested_tensors(
value_to_validate,
spec_to_validate,
num_outer_dims=num_outer_dims,
allow_extra_fields=True):
debug_str_1 = tf.nest.map_structure(
lambda tp: tp.shape, value_to_validate)
debug_str_2 = tf.nest.map_structure(
lambda spec: spec.shape, spec_to_validate)
raise ValueError(
'All of the Tensors in `value` must have a single outer (batch size) '
'dimension. Specifically, tensors must have {} outer dimensions.'
'\nFull shapes of value tensors:\n {}.\n'
'Expected shapes (excluding the outer dimensions):\n {}.'
.format(num_outer_dims, debug_str_1, debug_str_2))
def _validate_state(state: types.NestedTensor, spec: types.NestedTensorSpec):
if not nest_utils.is_batched_nested_tensors(
state,
spec,
num_outer_dims=1,
allow_extra_fields=False):
raise ValueError('action_step.state does not match spec. '
'action_step.state.shape: {state_shape}, '
'spec.shape: {spec_shape}'
'action_step.state: {state_value}, spec: '
'{spec_value}'.format(
state_shape=state.shape,
spec_shape=spec.shape,
state_value=state,
spec_value=spec))
class AsTrajectory(tf.Module):
"""Class that validates and converts other data types to Trajectory.
Note that validation and conversion allows values to contain dictionaries
with extra keys as compared to the the specs in the data context. These
additional entries / observations are ignored and dropped during conversion.
This non-strict checking allows users to provide additional info and
observation keys at input without having to manually prune them before
converting.
"""
def __init__(self,
data_context: DataContext,
sequence_length: typing.Optional[int] = None,
num_outer_dims: te.Literal[1, 2] = 2): # pylint: disable=bad-whitespace
"""Create the AsTrajectory converter.
Args:
data_context: An instance of `DataContext`, typically accessed from the
`TFAgent.data_context` property.
sequence_length: The required time dimension value (if any), typically
determined by the subclass of `TFAgent`.
num_outer_dims: Expected number of outer dimensions. Either 1 or 2.
If `1`, call expects an outer batch dimension. If `2`, then call
expects the two outer dimensions `[batch, time]`.
"""
self._data_context = data_context
self._sequence_length = sequence_length
self._num_outer_dims = num_outer_dims
def __call__(self, value: typing.Any) -> trajectory.Trajectory:
"""Convers `value` to a Trajectory. Performs data validation and pruning.
- If `value` is already a `Trajectory`, only validation is performed.
- If `value` is a `Transition` with tensors containing two (`[B, T]`)
outer dims, then it is simply repackaged to a `Trajectory` and then
validated.
- If `value` is a `Transition` with tensors containing one (`[B]`) outer
dim, a `ValueError` is raised.
Args:
value: A `Trajectory` or `Transition` object to convert.
Returns:
A validated and pruned `Trajectory`.
Raises:
TypeError: If `value` is not one of `Trajectory` or `Transition`.
ValueError: If `value` has structure that doesn't match the converter's
spec.
TypeError: If `value` has a structure that doesn't match the converter's
spec.
ValueError: If `value` is a `Transition` without a time dimension, as
training Trajectories typically have batch and time dimensions.
"""
if isinstance(value, trajectory.Trajectory):
pass
elif isinstance(value, trajectory.Transition):
value = trajectory.Trajectory(
step_type=value.time_step.step_type,
observation=value.time_step.observation,
action=value.action_step.action,
policy_info=value.action_step.info,
next_step_type=value.next_time_step.step_type,
reward=value.next_time_step.reward,
discount=value.next_time_step.discount)
else:
raise TypeError('Input type not supported: {}'.format(value))
_validate_trajectory(
value, self._data_context.trajectory_spec,
sequence_length=self._sequence_length,
num_outer_dims=self._num_outer_dims)
value = nest_utils.prune_extra_keys(
self._data_context.trajectory_spec, value)
return value
class AsTransition(tf.Module):
"""Class that validates and converts other data types to Transition.
Note that validation and conversion allows values to contain dictionaries
with extra keys as compared to the specs in the data context. These
additional entries / observations are ignored and dropped during conversion.
This non-strict checking allows users to provide additional info and
observation keys at input without having to manually prune them before
converting.
"""
def __init__(self, data_context: DataContext, squeeze_time_dim=False,
prepend_t0_to_next_time_step=False):
"""Creates the AsTransition converter.
Args:
data_context: An instance of `DataContext`, typically accessed from the
`TFAgent.data_context` property.
squeeze_time_dim: Whether to emit a transition without time
dimensions. If `True`, incoming trajectories are expected
to have a time dimension of exactly `2`, and emitted Transitions
will have no time dimensions.
prepend_t0_to_next_time_step: Whether to add t0 to next_time_step. This
option is useful when using sequential model and can allow target
network be able to take more information. Resulting shape of
time_step.observation is `[B, T, ...]` and resulting shape of
next_time_step.observation is `[B, T+1, ...]`.
"""
self._data_context = data_context
self._squeeze_time_dim = squeeze_time_dim
self._prepend_t0_to_next_time_step = prepend_t0_to_next_time_step
def __call__(self, value: typing.Any) -> trajectory.Transition:
"""Converts `value` to a Transition. Performs data validation and pruning.
- If `value` is already a `Transition`, only validation is performed.
- If `value` is a `Trajectory` and `squeeze_time_dim = True` then
`value` it must have tensors with shape `[B, T=2]` outer dims.
This is converted to a `Transition` object without a time
dimension.
- If `value` is a `Trajectory` with tensors containing a time dimension
having `T != 2`, a `ValueError` is raised.
Args:
value: A `Trajectory` or `Transition` object to convert.
Returns:
A validated and pruned `Transition`. If `squeeze_time_dim = True`,
the resulting `Transition` has tensors with shape `[B, ...]`. Otherwise,
the tensors will have shape `[B, T - 1, ...]`.
Raises:
TypeError: If `value` is not one of `Trajectory` or `Transition`.
ValueError: If `value` has structure that doesn't match the converter's
spec.
TypeError: If `value` has a structure that doesn't match the converter's
spec.
ValueError: If `squeeze_time_dim=True` and `value` is a `Trajectory`
with a time dimension having value other than `T=2`.
"""
if _is_transition_like(value):
value = _as_tfa_transition(value)
elif _is_trajectory_like(value):
required_sequence_length = 2 if self._squeeze_time_dim else None
_validate_trajectory(
value,
self._data_context.trajectory_spec,
sequence_length=required_sequence_length)
value = trajectory.to_transition(value)
# Remove the now-singleton time dim.
if self._squeeze_time_dim:
value = tf.nest.map_structure(
lambda x: composite.squeeze(x, axis=1), value)
else:
raise TypeError('Input type not supported: {}'.format(value))
num_outer_dims = 1 if self._squeeze_time_dim else 2
_validate_transition(
value, self._data_context.transition_spec, num_outer_dims)
value = nest_utils.prune_extra_keys(
self._data_context.transition_spec, value)
if self._prepend_t0_to_next_time_step:
# This is useful when using sequential model. It allows target_q network
# to take all the information.
next_time_step_with_t0 = value.next_time_step._replace(
observation=tf.nest.map_structure(
lambda x, y: tf.concat([x[:, :1, ...], y], axis=1),
value.time_step.observation, value.next_time_step.observation))
value = value._replace(next_time_step=next_time_step_with_t0)
return value
class AsHalfTransition(tf.Module):
"""Class that validates and converts other data types to HalfTransition.
HalfTransition is a special Transition whose next_time_step contains a dummy
observation.
Note that validation and conversion allows values to contain dictionaries
with extra keys as compared to the specs in the data context. These
additional entries / observations are ignored and dropped during conversion.
This non-strict checking allows users to provide additional info and
observation keys at input without having to manually prune them before
converting.
"""
def __init__(self, data_context: DataContext, squeeze_time_dim=False):
"""Create the AsTransition converter.
Args:
data_context: An instance of `DataContext`, typically accessed from the
`TFAgent.data_context` property.
squeeze_time_dim: Whether to emit a transition without time
dimensions. If `True`, incoming trajectories are expected
to have a time dimension of exactly `2`, and emitted Transitions
will have no time dimensions.
"""
self._data_context = data_context
self._squeeze_time_dim = squeeze_time_dim
def __call__(self, value: typing.Any) -> trajectory.Transition:
"""Convert `value` to an N-step Transition; validate data & prune.
- If `value` is already a `Transition`, only validation is performed.
- If `value` is a `Trajectory` with tensors containing a time dimension
having `T != n + 1`, a `ValueError` is raised.
Args:
value: A `Trajectory` or `Transition` object to convert.
Returns:
A validated and pruned `Transition`. If `squeeze_time_dim = True`,
the resulting `Transition` has tensors with shape `[B, ...]`. Otherwise,
the tensors will have shape `[B, T - 1, ...]`.
Raises:
TypeError: If `value` is not one of `Trajectory` or `Transition`.
ValueError: If `value` has structure that doesn't match the converter's
spec.
TypeError: If `value` has a structure that doesn't match the converter's
spec.
ValueError: If `n != None` and `value` is a `Trajectory`
with a time dimension having value other than `T=n + 1`.
"""
if _is_transition_like(value):
value = _as_tfa_transition(value)
elif _is_trajectory_like(value):
required_sequence_length = 1 if self._squeeze_time_dim else None
_validate_trajectory(
value,
self._data_context.trajectory_spec,
sequence_length=required_sequence_length)
if self._squeeze_time_dim:
value = tf.nest.map_structure(lambda e: tf.squeeze(e, axis=1), value)
policy_steps = policy_step.PolicyStep(
action=value.action, state=(), info=value.policy_info)
# TODO(b/130244652): Consider replacing 0 rewards & discounts with ().
time_steps = ts.TimeStep(
value.step_type,
reward=tf.nest.map_structure(tf.zeros_like, value.reward), # unknown
discount=tf.zeros_like(value.discount), # unknown
observation=value.observation)
next_time_steps = ts.TimeStep(
step_type=value.next_step_type,
reward=value.reward,
discount=value.discount,
observation=tf.zeros_like(value.discount))
value = trajectory.Transition(time_steps, policy_steps, next_time_steps)
else:
raise TypeError('Input type not supported: {}'.format(value))
num_outer_dims = 1 if self._squeeze_time_dim else 2
_validate_transition(
value, self._data_context.transition_spec, num_outer_dims)
value = nest_utils.prune_extra_keys(
self._data_context.transition_spec, value)
return value
class AsNStepTransition(tf.Module):
"""Class that validates and converts other data types to N-step Transition.
Note that validation and conversion allows values to contain dictionaries
with extra keys as compared to the the specs in the data context. These
additional entries / observations are ignored and dropped during conversion.
This non-strict checking allows users to provide additional info and
observation keys at input without having to manually prune them before
converting.
"""
def __init__(self,
data_context: DataContext,
gamma: types.Float,
n: typing.Optional[int] = None):
"""Create the AsNStepTransition converter.
For more details on how `Trajectory` objects are converted to N-step
`Transition` objects, see
`tf_agents.trajectories.trajectory.to_n_step_transition`.
Args:
data_context: An instance of `DataContext`, typically accessed from the
`TFAgent.data_context` property.
gamma: A floating point scalar; the discount factor.
n: (Optional.) The expected number of frames given a `Trajectory` input.
Given a `Trajectory` with tensors shaped `[B, T, ...]`, we ensure that
`T = n + 1`. Only used for validation.
"""
self._data_context = data_context
self._gamma = gamma
self._n = n
def __call__(self, value: typing.Any) -> trajectory.Transition:
"""Convert `value` to an N-step Transition; validate data & prune.
- If `value` is already a `Transition`, only validation is performed.
- If `value` is a `Trajectory` with tensors containing a time dimension
having `T != n + 1`, a `ValueError` is raised.
Args:
value: A `Trajectory` or `Transition` object to convert.
Returns:
A validated and pruned `Transition`. If `squeeze_time_dim = True`,
the resulting `Transition` has tensors with shape `[B, ...]`. Otherwise,
the tensors will have shape `[B, T - 1, ...]`.
Raises:
TypeError: If `value` is not one of `Trajectory` or `Transition`.
ValueError: If `value` has structure that doesn't match the converter's
spec.
TypeError: If `value` has a structure that doesn't match the converter's
spec.
ValueError: If `n != None` and `value` is a `Trajectory`
with a time dimension having value other than `T=n + 1`.
"""
if _is_transition_like(value):
value = _as_tfa_transition(value)
elif _is_trajectory_like(value):
_validate_trajectory(
value,
self._data_context.trajectory_spec,
sequence_length=None if self._n is None else self._n + 1)
value = trajectory.to_n_step_transition(value, gamma=self._gamma)
else:
raise TypeError('Input type not supported: {}'.format(value))
_validate_transition(
value, self._data_context.transition_spec, num_outer_dims=1)
value = nest_utils.prune_extra_keys(
self._data_context.transition_spec, value)
return value
| [
"[email protected]"
] | |
39747d390ebf9258de2c348aaf0ca675559bdadf | ad553dd718a8df51dabc9ba636040da740db57cf | /.history/app_20181219015549.py | 513904db7cccff06529a8549bb896f3c180c0921 | [] | no_license | NergisAktug/E-Commerce-PythonWithFlask-Sqlite3 | 8e67f12c28b11a7a30d13788f8dc991f80ac7696 | 69ff4433aa7ae52ef854d5e25472dbd67fd59106 | refs/heads/main | 2023-01-01T14:03:40.897592 | 2020-10-19T20:36:19 | 2020-10-19T20:36:19 | 300,379,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,009 | py | import time
import os
from flask import Flask,flash, request, render_template
from flask import Flask, url_for, redirect, session
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.sql import table, column, select
import hashlib
class ConfigClass(object):
SECRET_KEY = 'This is an INSECURE secret!! DO NOT use this in production!!'
SQLALCHEMY_DATABASE_URI = 'sqlite:///eticaret.sqlite'
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 465
MAIL_USE_SSL = True
MAIL_USE_TLS = False
MAIL_USERNAME = '[email protected]'
MAIL_PASSWORD = '05383896877'
MAIL_DEFAULT_SENDER = '"MyApp" <[email protected]>'
USER_ENABLE_EMAIL = True
USER_ENABLE_USERNAME = False
USER_EMAIL_SENDER_EMAIL = "[email protected]"
def create_app():
app = Flask(__name__)
app.config.from_object(__name__ + '.ConfigClass')
db = SQLAlchemy(app)
UPLOAD_FOLDER = os.path.basename('uploads')
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
class Kullanici(db.Model):
__tablename__ = 'Kullanici'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(80), unique=True)
sifre = db.Column(db.String(80))
rolId = db.Column(db.Integer, db.ForeignKey('rol.rolId', ondelete='CASCADE'))
def __init__(self, email, sifre,rolId):
self.email = email
self.sifre = sifre
self.rolId =rolId
class Roller(db.Model):
__tablename__ = 'rol'
rolId = db.Column(db.Integer, primary_key=True)
rolisim = db.Column(db.String(80))
class urunler(db.Model):
__tablename__ = 'urunler'
urun_id = db.Column(db.Integer, primary_key=True)
urunismi = db.Column(db.String(80))
urunresmi = db.Column(db.String(256))
urunstok=db.Column(db.Integer)
popular = db.Column(db.Text)
urunFiyati = db.Column(db.Integer)
markaId = db.Column(db.Integer,db.ForeignKey('markalar.markaId', ondelete='CASCADE'))
def __init__(self, urunismi, urunresmi, urunFiyati,urunstok,markaId):
self.urunismi =urunismi
self.urunresmi = urunresmi
self.urunFiyati = urunFiyati
self.markaId=markaId
self.urunstok=urunstok
class markalar(db.Model):
__tablename__ = 'markalar'
markaId = db.Column(db.Integer, primary_key=True)
markaadi = db.Column(db.String(80))
marka_modeli = db.Column(db.String(80))
def __init__(self, markaadi, marka_modeli):
self.markaadi = markaadi
self.marka_modeli = marka_modeli
class musteri(db.Model):
__tablename__ = 'musteri'
musteriId = db.Column(db.Integer, primary_key=True)
musteriadi = db.Column(db.String(80))
musterisoyadi = db.Column(db.String(80))
mail = db.Column(db.String(80), unique=True)
telefon = db.Column(db.Integer)
sifre = db.Column(db.String(80))
il = db.Column(db.String(80))
ilce = db.Column(db.String(80))
kullaniciId = db.Column(db.Integer(), db.ForeignKey('Kullanici.id', ondelete='CASCADE'))
def __init__(self, musteriadi, musterisoyadi, mail, telefon, sifre, il, ilce, kullaniciId):
self.musteriadi = musteriadi
self.musterisoyadi = musterisoyadi
self.mail = mail
self.telefon = telefon
self.sifre = sifre
self.il = il
self.ilce = ilce
self.kullaniciId = kullaniciId
class siparis(db.Model):
__tablename__ = 'siparis'
siparisId = db.Column(db.Integer, primary_key=True)
musteriId = db.Column(db.Integer(), db.ForeignKey('musteri.musteriId', ondelete='CASCADE'))
urunId = db.Column(db.Integer(), db.ForeignKey('urunler.urun_id', ondelete='CASCADE'))
kactanesatinalınmıs = db.Column(db.Text, nullable=False)
siparisTarihi = db.Column(db.Text,nullable=False)
def __init__(self, musteriId, urunId, kactanesatinalınmıs, siparisTarihi):
self.musteriId = musteriId
self.urunId = urunId
self.kactanesatinalınmıs =kactanesatinalınmıs
self.siparisTarihi = siparisTarihi
db.create_all()
AlısverisCantasi=[]
@app.route('/')
def anasayfa():
marka=markalar.query.all()
Urun=urunler.query.all()
tumVeri=urunler.query.all()
return render_template('index.html',tumVeri=tumVeri,Urun=Urun,marka=marka)
@app.route('/yukle', methods=['POST'])
def yukle():
file= request.files['image']
f = os.path.join(app.config['UPLOAD_FOLDER'], file.filename)
file.save(f)
return redirect(url_for('anasayfa'))
@app.route('/kayit', methods=['GET', 'POST'])
def kayit():
if request.method == 'POST':
mail = request.form['email']
parola = request.form['sifre']
sifrelenmis = hashlib.sha256(parola.encode("utf8")).hexdigest()
yeniKullanici = Kullanici(email=mail, sifre=sifrelenmis ,rolId=0)
db.session.add(yeniKullanici)
db.session.commit()
if yeniKullanici is not None:
mesaj = "Kayıt Başarıyla Sağlanmıştır."
return render_template("index.html", mesaj=mesaj)
else:
return render_template('kayit.html')
@app.route('/uye', methods=['GET', 'POST'])
def uye():
return redirect(url_for('giris'))
@app.route('/giris', methods=['GET', 'POST'])
def giris():
if request.method == 'GET':
return render_template('uyeGiris.html')
else:
email = request.form['email']
sifre = request.form['sifre']
sifrelenmis = hashlib.sha256(sifre.encode("utf8")).hexdigest()
data = Kullanici.query.filter_by(email=email, sifre=sifrelenmis ).first()
if data is not None:
if Kullanici.query.filter_by(email=email, sifre=sifre, rolId=1).first():
session['admin_giris'] = True
return render_template('admin.html',rolId = 1, gir = session['admin_giris'])
else:
session['uye_giris'] = True
session['sepett'] = AlısverisCantasi
session['id'] =data.id
session['name']=data.email
return redirect(url_for('anasayfa',rolId = 0, gir = session['uye_giris']))
else:
return render_template('uyeGiris.html')
@app.route('/cikis')
def cikis():
session['admin_giris'] = False
session['uye_giris'] = False
session.pop('sepett', None)
return redirect(url_for("anasayfa"))
@app.route('/urunEkle')
def urunGoster():
tumVeri=urunler.query.all()
return render_template("urunEkle.html",tumVeri=tumVeri)
@app.route('/urunEklemeYap',methods=['POST'])
def urunEklemeYap():
urunismi=request.form['urunismi']
urunResmi=request.form['urunresmi']
urunFiyati=request.form['fiyati']
urunStok=request.form['urunstok']
markaId=request.form['markaId']
yeniUrun=urunler(urunismi=urunismi,urunresmi=urunResmi,urunFiyati=urunFiyati,urunstok=urunStok,markaId=markaId)
db.session.add(yeniUrun)
db.session.commit()
return redirect(url_for("urunGoster"))
@app.route("/sil/<string:id>")
def sil(id):
urun=urunler.query.filter_by(urun_id=id).first()
db.session.delete(urun)
db.session.commit()
return redirect(url_for('urunGoster'))
@app.route('/guncelle/<string:id>',methods=['POST','GET'])
def guncelle(id):
try:
urunismi = request.form.get("urunİsmi")
urunresmi = request.form.get("urunresmi")
urunFiyati = request.form.get("urunFiyati")
urunStok=request.form.get("urunstok")
markaId = request.form.get("markaId")
urun = urunler.query.filter_by(urun_id=id).first()
urun.urunismi = urunismi
urun.urunresmi=urunresmi
urun.urunFiyati=urunFiyati
urun.urunstok=urunStok
urun.markaId=markaId
db.session.commit()
except Exception as e:
print("güncelleme yapılamadı")
print(e)
return redirect(url_for('urunGoster'))
@app.route('/sepett')
def sepett():
if 'uye_giris' in session:
if (session['uye_giris']==True):
al=session['sepett']
return render_template('sepet.html',sepet=al)
else:
return redirect(url_for('anasayfa'))
else:
session['uye_giris']=False
return redirect(url_for('anasayfa'))
@app.route('/sepet/<string:id>',methods=['POST','GET'])
def sepet(id):
if 'uye_giris' in session:
if (session['uye_giris']==True):
urun = urunler.query.filter_by(urun_id=id).first()
marka = markalar.query.filter_by(markaId=id).first()
durum = False
AlısverisCantasi=session['sepett']
for herbirsatir in AlısverisCantasi:
if (herbirsatir['id']==str(id)):
durum=True
if AlısverisCantasi==[]:
adet=1
toplam=adet*urun.urunFiyati
sepeturun={
'id':urun.urun_id,
'urunresmi':urun.urunresmi
'isim':urun.urunismi,
'urunFiyati':urun.urunFiyati,
'urunmodeli':marka.marka_modeli,
'adet': adet,
'toplam': toplam
}
AlısverisCantasi.append(sepeturun)
session['sepett']=AlısverisCantasi
elif durum==True:
bossepet=[]
for satir in AlısverisCantasi:
if str(satir['id'])==str(id):
adet=int(satir['adet'])
adet+=1
fiyat=int(satir['urunFiyati'])
toplam=adet*fiyat
satir['adet'] = str(adet)
satir['toplam'] = str(toplam)
bossepet.append(satir)
else:
bossepet.append(satir)
else:
adet=1
toplam=adet*urun.urunFiyati
sepeturun={
'id':urun.urun_id,
'isim':urun.urunismi,
'urunFiyati':urun.urunFiyati,
'urunmodeli':marka.marka_modeli,
'adet': adet,
'toplam': toplam
}
AlısverisCantasi.append(sepeturun)
session['sepett']=AlısverisCantasi
return redirect(url_for('sepett'))
else:
return redirect(url_for('giris'))
else:
session['uye_giris']=False
return redirect(url_for('giris'))
@app.route('/Markalar',methods=['POST','GET'])
def Markalar():
tumMarka=markalar.query.all()
return render_template("marka.html",tumMarka=tumMarka)
@app.route('/markaEklemeYap',methods=['POST'])
def markaEklemeYap():
ad=request.form['markaadi']
model=request.form['markamodeli']
yenimarka=markalar(markaadi=ad,marka_modeli=model)
db.session.add(yenimarka)
db.session.commit()
return redirect(url_for("Markalar"))
@app.route('/sepetguncelle/<string:urunid>',methods=['POST','GET'])
def sepetguncelle(urunid):
if 'uye_giris' in session:
if (session['uye_giris']==True):
if request.method=='GET':
return redirect(url_for('anasayfa'))
else:
adet=int(request.form['adet'])#form dan yeni adet bilgisi çekiliyor.
guncellsepet=[]
guncellsepet=session['sepett']
AlısverisCantasi.clear()
for degistir in guncellsepet:
if str(degistir['id'])==str(urunid):
fiyat=int(degistir['urunFiyati'])
toplam=(fiyat*adet)
degistir['adet']=str(adet)
degistir['toplam']=str(toplam)
AlısverisCantasi.append(degistir)
session['sepett']=AlısverisCantasi
return redirect(url_for('sepett'))
else:
return redirect(url_for('giris'))
else:
session['uye_giris']=False
return render_template('uyeGiris.html')
@app.route('/sepetisil/<string:urunid>',methods=['POST','GET'])
def sepetisil(urunid):
if 'uye_giris' in session:
if (session['uye_giris']==True):
silsepeti=[]
silsepeti=session['sepett']
AlısverisCantasi.clear()
for sil in silsepeti:
if str(sil['id'])!=str(urunid):
AlısverisCantasi.append(sil)
session['sepett']=AlısverisCantasi
return render_template('sepet.html',sepet=session['sepett'])
else:
return redirect(url_for('giris'))
else:
session['uye_giris']=False
return redirect(url_for('giris'))
@app.route('/tumsepetisil',methods=['POST','GET'])
def tumsepetisil():
if 'uye_giris' in session:
if (session['uye_giris']==True):
AlısverisCantasi.clear()
session['sepett']=AlısverisCantasi
return redirect(url_for('sepett'))
else:
return redirect(url_for('giris'))
else:
session['uye_giris']=False
return redirect(url_for('giris'))
@app.route('/satınAl')
def satınAl():
if 'uye_giris' in session:
if (session['uye_giris']==True):
satinalanid=session['id']
AlısverisCantasi=session['sepett']
for urun in AlısverisCantasi:
urunidd=int(urun['id'])
adet=urun['adet']
urunn=urunler.query.filter_by(urun_id=urunidd).first()
eski=int(urunn.popular)
urunn.popular=str(int(adet)+eski)
urunn.urunstok-=int(adet)
db.session.add(urunn)
db.session.commit()
tarih=str(time.strftime("%x")+"-"+time.strftime("%X"))
siparisgecmisi=siparis(musteriId=satinalanid,urunId=urunidd,kactanesatinalınmıs=adet,siparisTarihi=tarih)
db.session.add(siparisgecmisi)
db.session.commit()
AlısverisCantasi.clear()
session['sepett']=AlısverisCantasi
return redirect(url_for('sepett'))
else:
return redirect(url_for('giris'))
else:
session['uye_giris']=False
return redirect(url_for('giris'))
@app.route('/siparisgecmisi')
def siparisgecmisi():
if 'uye_giris' in session:
if (session['uye_giris']==True):
kullaniciId=session['id']
gecmissiparis=siparis.query.filter_by(musteriId=kullaniciId)
gecmissiparisleritut=[]
for s in gecmissiparis:
urunn=urunler.query.filter_by(urun_id=s.urunId).first()
model=markalar.query.filter_by(markaId=s.urunId).first()
benimsiparisgecmisim={
'urunisim':str(urunn.urunismi),
'urunresmi':str(urunn.urunresmi),
'urunadet':s.kactanesatinalınmıs,
'urunFiyati':str(urunn.urunFiyati),
'urunModeli':str(model.marka_modeli),
'satinalmatarihi':s.siparisTarihi
}
gecmissiparisleritut.append(benimsiparisgecmisim)
return render_template('siparisgecmisi.html',sepet=gecmissiparisleritut)
else:
return redirect(url_for('giris'))
else:
session['uye_giris']=False
return redirect(url_for('giris'))
return app
if __name__ == '__main__':
app=create_app()
app.run(host='127.0.0.1', port=5000, debug=True) | [
"[email protected]"
] | |
6f9382fca59ef7eb65bcb7683c81b7757f35b90d | f65163f0670b04ed3d68632b2d020186947cf2d7 | /bundology/urls.py | 69855c319f387c9c03135ffd03a1dd629ac94df2 | [
"MIT"
] | permissive | ImmaculateObsession/bundology | 92fb23527b642c70393e710ed042c3e661d014b7 | cbf3859fc51f4fcf7f0da608af37aeb55671aed4 | refs/heads/master | 2021-01-01T06:50:43.114328 | 2013-09-29T18:43:32 | 2013-09-29T18:43:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 589 | py | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
from bundles.views import HomeView
urlpatterns = patterns('',
# Examples:
url(r'^$', HomeView.as_view(), name='home'),
# url(r'^bundology/', include('bundology.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| [
"[email protected]"
] | |
2384c8e026b668140c8d5cad60d9811e0246ad3a | e6f144ff524a5e28bb9a3effe55f0e8b3d3f9d72 | /less14/chat_v2/chat/chatApp/validators.py | 321bd980636d9847dab114791bd09061c52d777d | [] | no_license | an4p/python_oop | 614382989914513b60b87d10e78c21a34debe162 | 5cff48072f30460df6e7300fa85038e18e986d4b | refs/heads/master | 2021-01-23T03:27:09.496370 | 2017-06-08T12:14:24 | 2017-06-08T12:14:24 | 86,077,237 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | from django.core.exceptions import ValidationError
import re
def not_null(data):
if data == None:
raise ValidationError("This field cannot be null")
def email_validation(data):
pattern = re.compile(r'\w{1,}')
isemail = pattern.match(data)
#print("isemail"+str(isemail.group(0)))
if isemail==None:
raise ValidationError("Your login is not email")
| [
"[email protected]"
] | |
9acbf3882c7fd6e2cdd0af5022f6f471245c0b6e | c0032b63a0220bfd0fe8592b8f6d8382b808417c | /0x06-python-classes/3-square.py | e6a1ab3e3e526c71a4ae6b5ce0d8dd39fac0f71f | [] | no_license | Arkadington/alx-higher_level_programming | 2104d200aa3b8ff5026476d975fc7dfabe9db660 | 36d4aa2f25416b94cf2fca5598717bcec98d6211 | refs/heads/master | 2023-05-23T11:24:42.750209 | 2021-05-26T07:19:26 | 2021-05-26T07:19:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | #!/usr/bin/python3
"""defining a square based on 2-square.py"""
class Square:
"""defines a square by size"""
def __init__(self, size=0):
"""initializing size argument"""
self.__size = size
if not isinstance(size, int):
raise TypeError('size must be an integer')
if size < 0:
raise ValueError('size must be >= 0')
def area(self):
"""returns the area of a square"""
area = self.__size * self.__size
return area
| [
"[email protected]"
] | |
61c5a2c0cbafcf79a81a70720540685f9d224f30 | 82573b51d1188f653b673e1261e02fc9e4e12e66 | /etc/beam_search.py | 6feed3d5cdd861462535b44d844fc11b60a17694 | [] | no_license | JeeYz/git_from_the_hell | 94a55b1c922c993383927d5aaa7cad066d645ddb | 0826ab6b1fd760c145ac5dcab6a8aaa9e9af9e02 | refs/heads/master | 2022-09-25T02:17:55.636721 | 2022-09-17T18:03:26 | 2022-09-17T18:03:26 | 152,916,026 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,084 | py |
import random
import copy
input_data = list()
for i in range(10):
temp_val = random.random()
input_data.append(temp_val)
print(input_data)
beam_search_size = 3
def gen_beam_list(input_data):
arrange_list = copy.deepcopy(input_data)
global beam_search_size
idx_num = 0
flag_num = 0
while True:
target_idx = idx_num+1
if arrange_list[idx_num] < arrange_list[target_idx]:
curr_num = arrange_list[idx_num]
next_num = arrange_list[target_idx]
arrange_list[idx_num] = next_num
arrange_list[target_idx] = curr_num
flag_num += 1
idx_num += 1
if idx_num == (len(arrange_list)-1):
idx_num = 0
if flag_num == 0:
break
else:
flag_num = 0
return arrange_list[:beam_search_size]
first_beam_list = gen_beam_list(input_data)
print(first_beam_list)
idx_list = list()
for one_val in first_beam_list:
temp_idx = input_data.index(one_val)
idx_list.append(temp_idx)
print(idx_list)
sequence_lenth = 5
result_list = list()
for i in range(beam_search_size):
temp_dict = dict()
temp_dict['index_list'] = list()
temp_dict['probability'] = 0.
temp_dict['index_list'].append(idx_list[i])
temp_dict['probability'] = input_data[idx_list[i]]
result_list.append(temp_dict)
def gen_random_list():
return_list = list()
for i in range(10):
return_list.append(random.random())
return return_list
def gen_index_list_for_beam(input_val_list, origin_data):
idx_list = list()
for one_val in input_val_list:
temp_idx = origin_data.index(one_val)
idx_list.append(temp_idx)
return idx_list
for i in range(sequence_lenth-1):
print("{}th sequence....".format(i))
limit_loop_num = len(result_list)
print(limit_loop_num)
for n in range(limit_loop_num):
one_dict = result_list[n]
origin_dict = copy.deepcopy(one_dict)
input_list = gen_random_list()
temp_val_list = gen_beam_list(input_list)
target_index_list = gen_index_list_for_beam(temp_val_list, input_list)
for j, one_idx in enumerate(target_index_list):
if j == 0:
one_dict['index_list'].append(one_idx)
one_dict['probability'] *= input_list[one_idx]
else:
temp_dict = copy.deepcopy(origin_dict)
temp_dict['index_list'].append(one_idx)
temp_dict['probability'] *= input_list[one_idx]
result_list.append(temp_dict)
def decide_result(input_full):
max_val = 0.
max_index = 0
for i, one_dict in enumerate(input_full):
if max_val < one_dict['probability']:
max_val = one_dict['probability']
max_index = i
return max_val, input_full[max_index]
result_val, result_seq = decide_result(result_list)
print("final result : {pro}, {sequence}".format(pro=result_val, sequence=result_seq['index_list']))
| [
"[email protected]"
] | |
d66e5f3155ea380201b01e010d93e3f7459d1d9b | 0de4549263e75a7614e7c430dac68822b18b0876 | /med1_med12_example/before_redefine/group.py | 8753e406ce81b7829d66cb354ff2fe1a00e8d5ae | [] | no_license | oaxiom/redefine_peaks | 7213630f021c69de5520300fe2121074159aa788 | c62abfc2702bca5172f58563ff7c478035c697f6 | refs/heads/master | 2020-07-30T14:10:11.671975 | 2019-09-26T02:17:22 | 2019-09-26T02:17:22 | 210,259,020 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 893 | py |
import glob
from glbase3 import *
config.draw_mode = ["png", 'pdf']
filenames = [os.path.split(f)[1] for f in glob.glob("../clus/*.bed")]
trks = [
flat_track(filename='../flats/esc_med1.flat'),
flat_track(filename='../flats/esc_med12.flat'),
]
peaks = [
genelist(filename="../peaks/esc_med1.rp1_summits.bed.gz", format=format.minimal_bed, gzip=True),
genelist(filename="../peaks/esc_med12.rp1_summits.bed.gz", format=format.minimal_bed, gzip=True),
]
gl = glglob()
ret = gl.chip_seq_cluster_heatmap(peaks, trks, "heatmap.png",
cache_data="data.bin", bracket=[9,14],
imshow=True, log=2,
pileup_distance=2000,
bins=50, read_extend=0)
gl.chip_seq_cluster_pileup(filename="clus/clusters.png")
for cid in ret:
print("cid:", cid, "len:", len(ret[cid]["genelist"]))
ret[cid]["genelist"].saveBED(filename="clus/cid_%s.bed" % cid, uniqueID=True)
| [
"[email protected]"
] | |
e7c60a0ec63f5654ae032c35925d7ffd99117de3 | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /tests/system/providers/apache/druid/example_druid_dag.py | 0552e10588950e78a6fc57488119b0e9bdf65845 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 1,992 | py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Example Airflow DAG to submit Apache Druid json index file using `DruidOperator`
"""
import os
from datetime import datetime
from airflow.models import DAG
from airflow.providers.apache.druid.operators.druid import DruidOperator
ENV_ID = os.environ.get("SYSTEM_TESTS_ENV_ID")
DAG_ID = "example_druid_operator"
with DAG(
dag_id=DAG_ID,
schedule_interval=None,
start_date=datetime(2021, 1, 1),
catchup=False,
tags=['example'],
) as dag:
# [START howto_operator_druid_submit]
submit_job = DruidOperator(task_id='spark_submit_job', json_index_file='json_index.json')
# Example content of json_index.json:
JSON_INDEX_STR = """
{
"type": "index_hadoop",
"datasource": "datasource_prd",
"spec": {
"dataSchema": {
"granularitySpec": {
"intervals": ["2021-09-01/2021-09-02"]
}
}
}
}
"""
# [END howto_operator_druid_submit]
from tests.system.utils import get_test_run # noqa: E402
# Needed to run the example DAG with pytest (see: tests/system/README.md#run_via_pytest)
test_run = get_test_run(dag)
| [
"[email protected]"
] | |
046821a28fcd0f8c07500a8e1002b4d2c26a518c | c7f43c4cc0ee84a5fe246b67f51e30b8d726ebd5 | /Competition/vision1/0203_8_private3.py | 5739dd3f67dbb144f198fea38d30aec58e244ef2 | [] | no_license | 89Mansions/AI_STUDY | d9f8bdf206f14ba41845a082e731ea844d3d9007 | d87c93355c949c462f96e85e8d0e186b0ce49c76 | refs/heads/master | 2023-07-21T19:11:23.539693 | 2021-08-30T08:18:59 | 2021-08-30T08:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,056 | py | # private 3등 코드
# train / test / validation (0.95) 분리
# batch_size = 16
# loss 줄었음, score 동일
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from keras.preprocessing.image import ImageDataGenerator
from numpy import expand_dims
from sklearn.model_selection import StratifiedKFold, cross_validate, train_test_split
from keras import Sequential
from keras.layers import *
from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from keras.optimizers import Adam
######################################################
# 데이터 로드
train = pd.read_csv('../data/DACON_vision1/train.csv')
print(train.shape) # (2048, 787)
sub = pd.read_csv('../data/DACON_vision1/submission.csv')
print(sub.shape) # (20480, 2)
test = pd.read_csv('../data/DACON_vision1/test.csv')
print(test.shape) # (20480, 786)
######################################################
#1. DATA
# print(train, test, sub)
# print(train['digit'].value_counts()) # 0부터 9까지
train2 = train.drop(['id', 'digit','letter'],1)
test2 = test.drop(['id','letter'],1) # >> x_pred
train2 = train2.values # >>> x
test2 = test2.values # >>> x_pred
# plt.imshow(train2[100].reshape(28,28))
# plt.show()
train2 = train2.reshape(-1,28,28,1)
test2 = test2.reshape(-1,28,28,1)
# preprocess
train2 = train2/255.0
test2 = test2/255.0
# ImageDataGenerator >> 데이터 증폭 : 데이터 양을 늘림으로써 오버피팅을 해결할 수 있다.
idg = ImageDataGenerator(height_shift_range=(-1,1),width_shift_range=(-1,1))
# width_shift_range : 왼쪽 오른쪽으로 움직인다.
# height_shift_range : 위쪽 아래쪽으로 움직인다.
idg2 = ImageDataGenerator()
'''
sample_data = train2[100].copy()
sample = expand_dims(sample_data,0)
# expand_dims : 차원을 확장시킨다.
sample_datagen = ImageDataGenerator(height_shift_range=(-1,1),width_shift_range=(-1,1))
sample_generator = sample_datagen.flow(sample, batch_size=1) # flow : ImageDataGenerator 디버깅
plt.figure(figsize=(16,10))
for i in range(9) :
plt.subplot(3, 3, i+1)
sample_batch = sample_generator.next()
sample_image = sample_batch[0]
plt.imshow(sample_image.reshape(28, 28))
plt.show()
'''
# cross validation
skf = StratifiedKFold(n_splits=40, random_state=42, shuffle=True)
#2. Modeling
# %%time
reLR = ReduceLROnPlateau(patience=100, verbose=1, factor=0.5)
es = EarlyStopping(patience=120, verbose=1)
val_loss_min = []
val_acc_max = []
result = 0
nth = 0
for train_index, test_index in skf.split(train2, train['digit']) : # >>> x, y
path = '../data/DACON_vision1/cp/0203_4_cp.hdf5'
mc = ModelCheckpoint(path, save_best_only=True, verbose=1)
x_train = train2[train_index]
x_test = train2[test_index]
y_train = train['digit'][train_index]
y_test = train['digit'][test_index]
x_train, x_valid, y_train, y_valid = train_test_split(x_train, y_train, train_size=0.95, shuffle=True, random_state=47)
train_generator = idg.flow(x_train, y_train, batch_size=16)
test_generator = idg2.flow(x_test, y_test, batch_size=16)
valid_generator = idg2.flow(x_valid, y_valid)
pred_generator = idg2.flow(test2, shuffle=False)
print(x_train.shape, x_test.shape, x_valid.shape) # (1896, 28, 28, 1) (52, 28, 28, 1) (100, 28, 28, 1)
print(y_train.shape, y_test.shape, y_valid.shape) # (1896,) (52,) (100,)
#2. Modeling
model = Sequential()
model.add(Conv2D(16, (3,3), activation='relu', input_shape=(28, 28,1), padding='same'))
model.add(BatchNormalization())
# BatchNormalization >> 학습하는 동안 모델이 추정한 입력 데이터 분포의 평균과 분산으로 normalization을 하고자 하는 것
model.add(Dropout(0.3))
model.add(Conv2D(32, (3,3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(32, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(3,3))
model.add(Dropout(0.3))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(Conv2D(64, (5, 5), activation='relu', padding='same'))
model.add(BatchNormalization())
model.add(MaxPooling2D(3,3))
model.add(Dropout(0.3))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(10, activation='softmax'))
#3. Compile, Train
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam(lr=0.002, epsilon=None), metrics=['acc'])
# epsilon : 0으로 나눠지는 것을 피하기 위함
learning_hist = model.fit_generator(train_generator, epochs=1000, validation_data=valid_generator, callbacks=[es, mc, reLR] )
model.load_weights('../data/DACON_vision1/cp/0203_4_cp.hdf5')
#4. Evaluate, Predict
loss, acc = model.evaluate(test_generator)
print("loss : ", loss)
print("acc : ", acc)
result += model.predict_generator(pred_generator, verbose=True)/40
# save val_loss
hist = pd.DataFrame(learning_hist.history)
val_loss_min.append(hist['val_loss'].min())
val_acc_max.append(hist['val_acc'].max())
nth += 1
print(nth, "번째 학습을 완료했습니다.")
print("val_loss_min :", np.mean(val_loss_min)) # val_loss_mean : 0.1835539501160383
print("val_acc_max :", np.mean(val_acc_max)) # val_acc_max : 0.9512500002980232
model.summary()
sub['digit'] = result.argmax(1)
print(sub)
sub.to_csv('../data/DACON_vision1/0203_4_private3.csv', index=False)
# xian submission 0203_4_pca
# score 0.9509803922
| [
"[email protected]"
] | |
ac7aa470914c9dcd181660f70288c52307f34e56 | 17889c693624186593a64bb2220035760316980b | /setup.py | bbc213031ca0fa6fb214a151fa3d269b11f6787b | [] | no_license | vahtras/pdpack | f0b50931ce7c987c8f6a151b8529f2175f6de990 | 5fe9c377ebadaff82e5db0650db60709a8065720 | refs/heads/master | 2021-01-10T02:08:53.424347 | 2016-04-08T08:54:34 | 2016-04-08T08:54:34 | 55,702,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | try:
import numpy
except ImportError:
import subprocess
subprocess.call("pip install numpy", shell=True)
from numpy.distutils.core import setup, Extension
ext = Extension(
name='linextra',
sources=['pdpack/linextra.F', 'pdpack/linpack.F'],
include_dirs=['pdpack/include'],
libraries=['blas']
)
setup(
name='linextra',
ext_modules=[ext]
)
| [
"[email protected]"
] | |
69be95888037623078a6dcbdfe8f36e773a65944 | d3e31f6b8da5c1a7310b543bbf2adc76091b5571 | /Day24/vd1/app.py | 563c66af2f3efd4d2c19b03b41765d7162a81322 | [] | no_license | pytutorial/py2103 | 224a5a7133dbe03fc4f798408694bf664be10613 | adbd9eb5a32eb1d28b747dcfbe90ab8a3470e5de | refs/heads/main | 2023-07-14T06:31:18.918778 | 2021-08-12T14:29:16 | 2021-08-12T14:29:16 | 355,163,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | #pip install flask
from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html') #'Hello'
app.run(debug=True) | [
"[email protected]"
] | |
d7ef042d180c534b37eea101e0c194f1a9ff0e20 | 5d2bc0efb0e457cfd55a90d9754d5ced9c009cae | /venv/lib/python2.7/site-packages/tests/test_024_ForeignKeys.py | d1e1d2981a54530173a85a1af5462ba6c56c0d9f | [] | no_license | michaelp1212/paxton | dafe08eca55557d036189d5242e47e89ec15bf2d | 0bd1da471c3a594c0765a4bc5cd1288404791caf | refs/heads/master | 2021-03-25T07:17:06.523340 | 2020-03-19T01:38:24 | 2020-03-19T01:38:24 | 247,598,121 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,878 | py | #
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2007-2008
#
# NOTE: IDS requires that you pass the schema name (cannot pass None)
from __future__ import print_function
import sys
import unittest
import ibm_db
import config
from testfunctions import IbmDbTestFunctions
class IbmDbTestCase(unittest.TestCase):
def test_024_ForeignKeys(self):
obj = IbmDbTestFunctions()
obj.assert_expect(self.run_test_024)
def run_test_024(self):
conn = ibm_db.connect(config.database, config.user, config.password)
server = ibm_db.server_info( conn )
if conn != 0:
drop = 'DROP TABLE test_primary_keys'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
drop = 'DROP TABLE test_keys'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
drop = 'DROP TABLE test_foreign_keys'
try:
result = ibm_db.exec_immediate(conn, drop)
except:
pass
statement = 'CREATE TABLE test_primary_keys (id INTEGER NOT NULL, PRIMARY KEY(id))'
result = ibm_db.exec_immediate(conn, statement)
statement = "INSERT INTO test_primary_keys VALUES (1)"
result = ibm_db.exec_immediate(conn, statement)
statement = 'CREATE TABLE test_keys (name VARCHAR(30) NOT NULL, idf INTEGER NOT NULL, FOREIGN KEY(idf) REFERENCES test_primary_keys(id), \
PRIMARY KEY(name))'
result = ibm_db.exec_immediate(conn, statement)
statement = "INSERT INTO test_keys VALUES ('vince', 1)"
result = ibm_db.exec_immediate(conn, statement)
statement = 'CREATE TABLE test_foreign_keys (namef VARCHAR(30) NOT NULL, id INTEGER NOT NULL, FOREIGN KEY(namef) REFERENCES test_keys(name))'
result = ibm_db.exec_immediate(conn, statement)
statement = "INSERT INTO test_foreign_keys VALUES ('vince', 1)"
result = ibm_db.exec_immediate(conn, statement)
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_primary_keys')
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_PRIMARY_KEYS')
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, None, None, None, config.user, 'test_keys')
else:
stmt = ibm_db.foreign_keys(conn, None, None, None, None, None, 'TEST_KEYS')
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_keys', None, None, None)
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_KEYS', None, None, None)
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_keys', None, config.user, 'test_foreign_keys')
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_KEYS', None, None, 'TEST_FOREIGN_KEYS')
row = ibm_db.fetch_tuple(stmt)
print(row[2])
print(row[3])
print(row[6])
print(row[7])
try:
stmt = ibm_db.foreign_keys(conn, None, None, None, None, None, None)
row = ibm_db.fetch_tuple(stmt)
except:
if (not stmt):
print(ibm_db.stmt_errormsg())
if (server.DBMS_NAME[0:3] == 'IDS'):
stmt = ibm_db.foreign_keys(conn, None, config.user, 'test_keys', None, 'dummy_schema')
else:
stmt = ibm_db.foreign_keys(conn, None, None, 'TEST_KEYS', None, 'dummy_schema')
row = ibm_db.fetch_tuple(stmt)
if(not row):
print("No Data Found")
else:
print(row)
ibm_db.close(conn)
else:
print(ibm_db.conn_errormsg())
print("Connection failed\n")
#__END__
#__LUW_EXPECTED__
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#No Data Found
#__ZOS_EXPECTED__
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#No Data Found
#__SYSTEMI_EXPECTED__
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_PRIMARY_KEYS
#ID
#TEST_KEYS
#IDF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#TEST_KEYS
#NAME
#TEST_FOREIGN_KEYS
#NAMEF
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#__IDS_EXPECTED__
#test_primary_keys
#id
#test_keys
#idf
#test_primary_keys
#id
#test_keys
#idf
#test_keys
#name
#test_foreign_keys
#namef
#test_keys
#name
#test_foreign_keys
#namef
#[IBM][CLI Driver] CLI0124E Invalid argument value. SQLSTATE=HY009 SQLCODE=-99999
#No Data Found
| [
"[email protected]"
] | |
eb4b3921a93a20d061cc44081c98fc02a0a9321e | 3bc4b502fdb5ffecdbecc9239a0c25746dc31022 | /Ch03/p69.py | c164e5529ca7a074b8d57aedb66553a6e4e56930 | [] | no_license | pkc-3/python | 68da873bbe7ad9a3e0db4e22ddaa412a9377720f | d8410d897c3784c6017f7edc215ce8763e557518 | refs/heads/master | 2023-05-31T06:40:30.279748 | 2021-06-10T09:00:09 | 2021-06-10T09:00:09 | 361,634,592 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 137 | py | # break, continue 예
i = 0
while i < 10:
i += 1
if i == 3:
continue
if i == 6:
break
print(i, end=' ') | [
"[email protected]"
] | |
712fdaba5b3b039c11c0a907327177ff5bd9909d | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/1219692/snippet.py | 72feef967aeb0d54cf1d96dad510906647484037 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 1,383 | py | #!/usr/bin/env python
"""
This script looks up how many followers two different Twitter accounts do have in common.
Usage:
twitter_follower_intersect.py username username
You'll need Python and the Python Twitter Tools to get this running.
http://pypi.python.org/pypi/twitter/
Also you will have to create an app at https://dev.twitter.com/apps/
and enter your credentials below:
"""
auth_token = '...'
auth_token_secret = '...'
consumer_key = '...'
consumer_secret = '...'
from twitter import Twitter, OAuth
import sys, os
if len(sys.argv) != 3:
print 'Usage:\n\n'+os.path.basename(sys.argv[0])+' screenname1 screenname2';
t = Twitter(auth=OAuth(auth_token, auth_token_secret, consumer_key, consumer_secret))
user_a = sys.argv[1]
user_b = sys.argv[2]
a = t.followers.ids(user=user_a)
b = t.followers.ids(user=user_b)
c = []
for id in a:
try:
b.index(id)
c.append(id)
except:
True
print '\n'+user_a, 'has', len(a), 'follower'
print user_b, 'has', len(b), 'follower'
print user_a, 'and', user_b, 'have', len(c), 'followers in common'
if len(c) > 100:
c = c[:100]
print '\nfirst 100 common followers are:'
elif len(c) > 0:
print '\nthese are the common followers:'
if len(c) > 0:
common_info = t.users.lookup(user_id=','.join(map(str, c)))
common = []
for u in common_info:
common.append(u['screen_name'])
print ', '.join(common)
print | [
"[email protected]"
] | |
79807977daf7baeaa71f2944fdd760df505aaf6f | bcabce262e54a6ac38948a4717254cdc3ce65874 | /mealpy/physics_based/NRO.py | 50aba440638bb516c9bae21d4ad0b3dca935f137 | [
"MIT"
] | permissive | ibrahim85/MEta-heuristics-ALgorithms-in-PYthon | 4ab6e6ef54127b6f4721178a1f855d1be91f9b42 | 47fb428e8378fc52cd5fe6eff20cec1c68ba5039 | refs/heads/master | 2023-06-03T05:23:31.993100 | 2021-06-28T14:48:38 | 2021-06-28T14:48:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,908 | py | #!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 07:02, 18/03/2020 %
# %
# Email: [email protected] %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy import sin, abs, sqrt, pi, subtract, array, exp
from numpy import log as loge
from numpy.random import uniform, normal, choice, rand
from numpy.linalg import norm
from copy import deepcopy
from math import gamma
from scipy.stats import rankdata
from mealpy.root import Root
class BaseNRO(Root):
"""
The original version of: Nuclear Reaction Optimization (NRO)
An Approach Inspired from Nuclear Reaction Processes for Numerical Optimization
Nuclear Reaction Optimization: A novel and powerful physics-based algorithm for global optimization
Link:
https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8720256
"""
def __init__(self, obj_func=None, lb=None, ub=None, verbose=True, epoch=750, pop_size=100, **kwargs):
super().__init__(obj_func, lb, ub, verbose, kwargs)
self.epoch = epoch
self.pop_size = pop_size
def train(self):
pop = [self.create_solution(minmax=self.ID_MIN_PROB) for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MIN_PROB)
for epoch in range(self.epoch):
xichma_v = 1
xichma_u = ((gamma(1 + 1.5) * sin(pi * 1.5 / 2)) / (gamma((1 + 1.5) / 2) * 1.5 * 2 ** ((1.5 - 1) / 2))) ** (1.0 / 1.5)
levy_b = (normal(0, xichma_u ** 2)) / (sqrt(abs(normal(0, xichma_v ** 2))) ** (1.0 / 1.5))
# NFi phase
Pb = uniform()
Pfi = uniform()
freq = 0.05
alpha = 0.01
for i in range(self.pop_size):
## Calculate neutron vector Nei by Eq. (2)
## Random 1 more index to select neutron
temp1 = list(set(range(0, self.pop_size)) - {i})
i1 = choice(temp1, replace=False)
Nei = (pop[i][self.ID_POS] + pop[i1][self.ID_POS]) / 2
## Update population of fission products according to Eq.(3), (6) or (9);
if uniform() <= Pfi:
### Update based on Eq. 3
if uniform() <= Pb:
xichma1 = (loge(epoch + 1) * 1.0 / (epoch+1)) * abs( subtract(pop[i][self.ID_POS], g_best[self.ID_POS]))
gauss = array([normal(g_best[self.ID_POS][j], xichma1[j]) for j in range(self.problem_size)])
Xi = gauss + uniform() * g_best[self.ID_POS] - round(rand() + 1)*Nei
### Update based on Eq. 6
else:
i2 = choice(temp1, replace=False)
xichma2 = (loge(epoch + 1) * 1.0 / (epoch+1)) * abs( subtract(pop[i2][self.ID_POS], g_best[self.ID_POS]))
gauss = array([normal(pop[i][self.ID_POS][j], xichma2[j]) for j in range(self.problem_size)])
Xi = gauss + uniform() * g_best[self.ID_POS] - round(rand() + 2) * Nei
## Update based on Eq. 9
else:
i3 = choice(temp1, replace=False)
xichma2 = (loge(epoch + 1) * 1.0 / (epoch+1)) * abs( subtract(pop[i3][self.ID_POS], g_best[self.ID_POS]))
Xi = array([normal(pop[i][self.ID_POS][j], xichma2[j]) for j in range(self.problem_size)])
## Check the boundary and evaluate the fitness function
Xi = self.amend_position_random_faster(Xi)
fit = self.get_fitness_position(Xi, self.ID_MIN_PROB)
if fit < pop[i][self.ID_FIT]:
pop[i] = [Xi, fit]
if fit < g_best[self.ID_FIT]:
g_best = [Xi, fit]
# NFu phase
## Ionization stage
## Calculate the Pa through Eq. (10);
ranked_pop = rankdata([pop[i][self.ID_FIT] for i in range(self.pop_size)])
for i in range(self.pop_size):
X_ion = deepcopy(pop[i][self.ID_POS])
if (ranked_pop[i] * 1.0 / self.pop_size) < uniform():
i1, i2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
for j in range(self.problem_size):
#### Levy flight strategy is described as Eq. 18
if pop[i2][self.ID_POS][j] == pop[i][self.ID_POS][j]:
X_ion[j] = pop[i][self.ID_POS][j] + alpha * levy_b * (pop[i][self.ID_POS][j] - g_best[self.ID_POS][j])
#### If not, based on Eq. 11, 12
else:
if uniform() <= 0.5:
X_ion[j] = pop[i1][self.ID_POS][j] + uniform() * (pop[i2][self.ID_POS][j] - pop[i][self.ID_POS][j])
else:
X_ion[j] = pop[i1][self.ID_POS][j] - uniform() * (pop[i2][self.ID_POS][j] - pop[i][self.ID_POS][j])
else: #### Levy flight strategy is described as Eq. 21
X_worst = self.get_global_best_solution(pop, self.ID_FIT, self.ID_MAX_PROB)
for j in range(self.problem_size):
##### Based on Eq. 21
if X_worst[self.ID_POS][j] == g_best[self.ID_POS][j]:
X_ion[j] = pop[i][self.ID_POS][j] + alpha * levy_b * (self.ub[j] - self.lb[j])
##### Based on Eq. 13
else:
X_ion[j] = pop[i][self.ID_POS][j] + round(uniform()) * uniform()*(X_worst[self.ID_POS][j] - g_best[self.ID_POS][j])
## Check the boundary and evaluate the fitness function for X_ion
X_ion = self.amend_position_random_faster(X_ion)
fit = self.get_fitness_position(X_ion, self.ID_MIN_PROB)
if fit < pop[i][self.ID_FIT]:
pop[i] = [X_ion, fit]
if fit < g_best[self.ID_FIT]:
g_best = [X_ion, fit]
## Fusion Stage
### all ions obtained from ionization are ranked based on (14) - Calculate the Pc through Eq. (14)
ranked_pop = rankdata([pop[i][self.ID_FIT] for i in range(self.pop_size)])
for i in range(self.pop_size):
i1, i2 = choice(list(set(range(0, self.pop_size)) - {i}), 2, replace=False)
#### Generate fusion nucleus
if (ranked_pop[i] * 1.0 / self.pop_size) < uniform():
t1 = uniform() * (pop[i1][self.ID_POS] - g_best[self.ID_POS])
t2 = uniform() * (pop[i2][self.ID_POS] - g_best[self.ID_POS])
temp2 = pop[i1][self.ID_POS] - pop[i2][self.ID_POS]
X_fu = pop[i][self.ID_POS] + t1 + t2 - exp(-norm(temp2)) * temp2
#### Else
else:
##### Based on Eq. 22
check_equal = (pop[i1][self.ID_POS] == pop[i2][self.ID_POS])
if check_equal.all():
X_fu = pop[i][self.ID_POS] + alpha * levy_b * (pop[i][self.ID_POS] - g_best[self.ID_POS])
##### Based on Eq. 16, 17
else:
if uniform() > 0.5:
X_fu = pop[i][self.ID_POS] - 0.5*(sin(2*pi*freq*epoch + pi)*(self.epoch - epoch)/self.epoch + 1)*(pop[i1][self.ID_POS] - pop[i2][self.ID_POS])
else:
X_fu = pop[i][self.ID_POS] - 0.5 * (sin(2 * pi * freq * epoch + pi) * epoch / self.epoch + 1) * (pop[i1][self.ID_POS] - pop[i2][self.ID_POS])
X_fu = self.amend_position_random_faster(X_fu)
fit = self.get_fitness_position(X_fu, self.ID_MIN_PROB)
if fit < pop[i][self.ID_FIT]:
pop[i] = [X_fu, fit]
if fit < g_best[self.ID_FIT]:
g_best = [X_fu, fit]
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print(">Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train | [
"[email protected]"
] | |
a25550e858dae0bcc5b99adae2e16dcac185d00c | 6e57bdc0a6cd18f9f546559875256c4570256c45 | /external/toolchain-utils/build_tool.py | 3bd357c084263497145de778971668962d3eed1d | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dongdong331/test | 969d6e945f7f21a5819cd1d5f536d12c552e825c | 2ba7bcea4f9d9715cbb1c4e69271f7b185a0786e | refs/heads/master | 2023-03-07T06:56:55.210503 | 2020-12-07T04:15:33 | 2020-12-07T04:15:33 | 134,398,935 | 2 | 1 | null | 2022-11-21T07:53:41 | 2018-05-22T10:26:42 | null | UTF-8 | Python | false | false | 31,006 | py | #!/usr/bin/env python2
"""Script to bootstrap the chroot using new toolchain.
This script allows you to build/install a customized version of gcc/binutils,
either by specifying branch or a local directory.
This script must be executed outside chroot.
Below is some typical usage -
## Build gcc located at /local/gcc/dir and do a bootstrap using the new
## compiler for the chromeos root. The script tries to find a valid chromeos
## tree all the way up from your current working directory.
./build_tool.py --gcc_dir=/loca/gcc/dir --bootstrap
## Build binutils, using remote branch "mobile_toolchain_v17" and do a
## bootstrap using the new binutils for the chromeos root. The script tries to
## find a valid chromeos tree all the way up from your current working
## directory.
./build_tool.py --binutils_branch=cros/mobile_toolchain_v17 \
--chromeos_root=/chromeos/dir --bootstrap
## Same as above except only do it for board daisy - no bootstrapping involved.
./build_tool.py --binutils_branch=cros/mobile_toolchain_v16 \
--chromeos_root=/chromeos/dir --board=daisy
"""
from __future__ import print_function
__author__ = '[email protected] (Han Shen)'
import argparse
import os
import re
import sys
from cros_utils import command_executer
from cros_utils import logger
from cros_utils import misc
import repo_to_repo
REPO_PATH_PATTERN = 'src/third_party/{0}'
TEMP_BRANCH_NAME = 'internal_testing_branch_no_use'
CHROMIUMOS_OVERLAY_PATH = 'src/third_party/chromiumos-overlay'
EBUILD_PATH_PATTERN = 'src/third_party/chromiumos-overlay/sys-devel/{0}'
class Bootstrapper(object):
"""Class that handles bootstrap process."""
def __init__(self,
chromeos_root,
ndk_dir,
gcc_branch=None,
gcc_dir=None,
binutils_branch=None,
binutils_dir=None,
board=None,
disable_2nd_bootstrap=False,
setup_tool_ebuild_file_only=False):
self._chromeos_root = chromeos_root
self._ndk_dir = ndk_dir
self._gcc_branch = gcc_branch
self._gcc_branch_tree = None
self._gcc_dir = gcc_dir
self._gcc_ebuild_file = None
self._gcc_ebuild_file_name = None
self._binutils_branch = binutils_branch
self._binutils_branch_tree = None
self._binutils_dir = binutils_dir
self._binutils_ebuild_file = None
self._binutils_ebuild_file_name = None
self._setup_tool_ebuild_file_only = setup_tool_ebuild_file_only
self._ce = command_executer.GetCommandExecuter()
self._logger = logger.GetLogger()
self._board = board
self._disable_2nd_bootstrap = disable_2nd_bootstrap
def IsTreeSame(self, t1, t2):
diff = 'diff -qr -x .git -x .svn "{0}" "{1}"'.format(t1, t2)
if self._ce.RunCommand(diff, print_to_console=False) == 0:
self._logger.LogOutput('"{0}" and "{1}" are the same."'.format(t1, t2))
return True
self._logger.LogWarning('"{0}" and "{1}" are different."'.format(t1, t2))
return False
def SubmitToLocalBranch(self):
"""Copy source code to the chromium source tree and submit it locally."""
if self._gcc_dir:
if not self.SubmitToolToLocalBranch(
tool_name='gcc', tool_dir=self._gcc_dir):
return False
self._gcc_branch = TEMP_BRANCH_NAME
if self._binutils_dir:
if not self.SubmitToolToLocalBranch(
tool_name='binutils', tool_dir=self._binutils_dir):
return False
self._binutils_branch = TEMP_BRANCH_NAME
return True
def SubmitToolToLocalBranch(self, tool_name, tool_dir):
"""Copy the source code to local chromium source tree.
Args:
tool_name: either 'gcc' or 'binutils'
tool_dir: the tool source dir to be used
Returns:
True if all succeeded False otherwise.
"""
# The next few steps creates an internal branch to sync with the tool dir
# user provided.
chrome_tool_dir = self.GetChromeOsToolDir(tool_name)
# 0. Test to see if git tree is free of local changes.
if not misc.IsGitTreeClean(chrome_tool_dir):
self._logger.LogError(
'Git repository "{0}" not clean, aborted.'.format(chrome_tool_dir))
return False
# 1. Checkout/create a (new) branch for testing.
command = 'cd "{0}" && git checkout -B {1}'.format(chrome_tool_dir,
TEMP_BRANCH_NAME)
ret = self._ce.RunCommand(command)
if ret:
self._logger.LogError('Failed to create a temp branch for test, aborted.')
return False
if self.IsTreeSame(tool_dir, chrome_tool_dir):
self._logger.LogOutput('"{0}" and "{1}" are the same, sync skipped.'.
format(tool_dir, chrome_tool_dir))
return True
# 2. Sync sources from user provided tool dir to chromiumos tool git.
local_tool_repo = repo_to_repo.FileRepo(tool_dir)
chrome_tool_repo = repo_to_repo.GitRepo(chrome_tool_dir, TEMP_BRANCH_NAME)
chrome_tool_repo.SetRoot(chrome_tool_dir)
# Delete all stuff except '.git' before start mapping.
self._ce.RunCommand(
'cd {0} && find . -maxdepth 1 -not -name ".git" -not -name "." '
r'\( -type f -exec rm {{}} \; -o '
r' -type d -exec rm -fr {{}} \; \)'.format(chrome_tool_dir))
local_tool_repo.MapSources(chrome_tool_repo.GetRoot())
# 3. Ensure after sync tree is the same.
if self.IsTreeSame(tool_dir, chrome_tool_dir):
self._logger.LogOutput('Sync successfully done.')
else:
self._logger.LogError('Sync not successful, aborted.')
return False
# 4. Commit all changes.
# 4.1 Try to get some information about the tool dir we are using.
cmd = 'cd {0} && git log -1 --pretty=oneline'.format(tool_dir)
tool_dir_extra_info = None
ret, tool_dir_extra_info, _ = self._ce.RunCommandWOutput(
cmd, print_to_console=False)
commit_message = 'Synced with tool source tree at - "{0}".'.format(tool_dir)
if not ret:
commit_message += '\nGit log for {0}:\n{1}'.format(
tool_dir, tool_dir_extra_info.strip())
if chrome_tool_repo.CommitLocally(commit_message):
self._logger.LogError('Commit to local branch "{0}" failed, aborted.'.
format(TEMP_BRANCH_NAME))
return False
return True
def CheckoutBranch(self):
"""Checkout working branch for the tools.
Returns:
True: if operation succeeds.
"""
if self._gcc_branch:
rv = self.CheckoutToolBranch('gcc', self._gcc_branch)
if rv:
self._gcc_branch_tree = rv
else:
return False
if self._binutils_branch:
rv = self.CheckoutToolBranch('binutils', self._binutils_branch)
if rv:
self._binutils_branch_tree = rv
else:
return False
return True
def CheckoutToolBranch(self, tool_name, tool_branch):
"""Checkout the tool branch for a certain tool.
Args:
tool_name: either 'gcc' or 'binutils'
tool_branch: tool branch to use
Returns:
True: if operation succeeds. Otherwise False.
"""
chrome_tool_dir = self.GetChromeOsToolDir(tool_name)
command = 'cd "{0}" && git checkout {1}'.format(chrome_tool_dir,
tool_branch)
if not self._ce.RunCommand(command, print_to_console=True):
# Get 'TREE' value of this commit
command = ('cd "{0}" && git cat-file -p {1} '
'| grep -E "^tree [a-f0-9]+$" '
'| cut -d" " -f2').format(chrome_tool_dir, tool_branch)
ret, stdout, _ = self._ce.RunCommandWOutput(
command, print_to_console=False)
# Pipe operation always has a zero return value. So need to check if
# stdout is valid.
if not ret and stdout and re.match('[0-9a-h]{40}',
stdout.strip(), re.IGNORECASE):
tool_branch_tree = stdout.strip()
self._logger.LogOutput('Find tree for {0} branch "{1}" - "{2}"'.format(
tool_name, tool_branch, tool_branch_tree))
return tool_branch_tree
self._logger.LogError(('Failed to checkout "{0}" or failed to '
'get tree value, aborted.').format(tool_branch))
return None
def FindEbuildFile(self):
"""Find the ebuild files for the tools.
Returns:
True: if operation succeeds.
"""
if self._gcc_branch:
(rv, ef, efn) = self.FindToolEbuildFile('gcc')
if rv:
self._gcc_ebuild_file = ef
self._gcc_ebuild_file_name = efn
else:
return False
if self._binutils_branch:
(rv, ef, efn) = self.FindToolEbuildFile('binutils')
if rv:
self._binutils_ebuild_file = ef
self._binutils_ebuild_file_name = efn
else:
return False
return True
def FindToolEbuildFile(self, tool_name):
"""Find ebuild file for a specific tool.
Args:
tool_name: either "gcc" or "binutils".
Returns:
A triplet that consisits of whether operation succeeds or not,
tool ebuild file full path and tool ebuild file name.
"""
# To get the active gcc ebuild file, we need a workable chroot first.
if not os.path.exists(
os.path.join(self._chromeos_root, 'chroot')) and self._ce.RunCommand(
'cd "{0}" && cros_sdk --create'.format(self._chromeos_root)):
self._logger.LogError(('Failed to install a initial chroot, aborted.\n'
'If previous bootstrap failed, do a '
'"cros_sdk --delete" to remove '
'in-complete chroot.'))
return (False, None, None)
rv, stdout, _ = self._ce.ChrootRunCommandWOutput(
self._chromeos_root,
'equery w sys-devel/{0}'.format(tool_name),
print_to_console=True)
if rv:
self._logger.LogError(
('Failed to execute inside chroot '
'"equery w sys-devel/{0}", aborted.').format(tool_name))
return (False, None, None)
m = re.match(r'^.*/({0}/(.*\.ebuild))$'.format(
EBUILD_PATH_PATTERN.format(tool_name)), stdout)
if not m:
self._logger.LogError(
('Failed to find {0} ebuild file, aborted. '
'If previous bootstrap failed, do a "cros_sdk --delete" to remove '
'in-complete chroot.').format(tool_name))
return (False, None, None)
tool_ebuild_file = os.path.join(self._chromeos_root, m.group(1))
tool_ebuild_file_name = m.group(2)
return (True, tool_ebuild_file, tool_ebuild_file_name)
def InplaceModifyEbuildFile(self):
"""Modify the ebuild file.
Returns:
True if operation succeeds.
"""
# Note we shall not use remote branch name (eg. "cros/gcc.gnu.org/...") in
# CROS_WORKON_COMMIT, we have to use GITHASH. So we call GitGetCommitHash on
# tool_branch.
tool = None
toolbranch = None
if self._gcc_branch:
tool = 'gcc'
toolbranch = self._gcc_branch
tooltree = self._gcc_branch_tree
toolebuild = self._gcc_ebuild_file
elif self._binutils_branch:
tool = 'binutils'
toolbranch = self._binutils_branch
tooltree = self._binutils_branch_tree
toolebuild = self._binutils_ebuild_file
assert tool
# An example for the following variables would be:
# tooldir = '~/android/master-ndk/toolchain/gcc/gcc-4.9'
# tool_branch_githash = xxxxx
# toolcomponents = toolchain/gcc
tooldir = self.GetChromeOsToolDir(tool)
toolgithash = misc.GitGetCommitHash(tooldir, toolbranch)
if not toolgithash:
return False
toolcomponents = 'toolchain/{}'.format(tool)
return self.InplaceModifyToolEbuildFile(toolcomponents, toolgithash,
tooltree, toolebuild)
@staticmethod
def ResetToolEbuildFile(chromeos_root, tool_name):
"""Reset tool ebuild file to clean state.
Args:
chromeos_root: chromeos source tree
tool_name: either "gcc" or "binutils"
Returns:
True if operation succeds.
"""
rv = misc.GetGitChangesAsList(
os.path.join(chromeos_root, CHROMIUMOS_OVERLAY_PATH),
path=('sys-devel/{0}/{0}-*.ebuild'.format(tool_name)),
staged=False)
if rv:
cmd = 'cd {0} && git checkout --'.format(
os.path.join(chromeos_root, CHROMIUMOS_OVERLAY_PATH))
for g in rv:
cmd += ' ' + g
rv = command_executer.GetCommandExecuter().RunCommand(cmd)
if rv:
logger.GetLogger().LogWarning(
'Failed to reset the ebuild file. Please refer to log above.')
return False
else:
logger.GetLogger().LogWarning(
'Note - did not find any modified {0} ebuild file.'.format(tool_name))
# Fall through
return True
def GetChromeOsToolDir(self, tool_name):
"""Return the chromeos git dir for a specific tool.
Note, after we unified ChromeOs and Android, the tool dir is under
ndk_dir/toolchain/[gcc,binutils].
Args:
tool_name: either 'gcc' or 'binutils'.
Returns:
Absolute git path for the tool.
"""
tool_toppath = os.path.join(self._ndk_dir, 'toolchain', tool_name)
# There may be sub-directories like 'binutils-2.25', 'binutils-2.24',
# 'gcc-4.9', 'gcc-4.8', etc. find the newest binutils version.
cmd = ('find {} -maxdepth 1 -type d -name "{}-*" '
'| sort -r | head -1').format(tool_toppath, tool_name)
rv, out, _ = self._ce.RunCommandWOutput(cmd, print_to_console=False)
if rv:
return None
repo = out.strip()
# cros-workon eclass expects every CROS_WORKON_PROJECT ends with ".git".
self._ce.RunCommand(('cd $(dirname {0}) && '
'ln -sf $(basename {0}) $(basename {0}).git').format(
repo, print_to_console=True))
return repo
def InplaceModifyToolEbuildFile(self, tool_components, tool_branch_githash,
tool_branch_tree, tool_ebuild_file):
"""Using sed to fill properly values into the ebuild file.
Args:
tool_components: either "toolchain/gcc" or "toolchain/binutils"
tool_branch_githash: githash for tool_branch
tool_branch_tree: treeish for the tool branch
tool_ebuild_file: tool ebuild file
Returns:
True: if operation succeeded.
"""
command = ('sed -i '
'-e \'/^CROS_WORKON_REPO=".*"/i'
' # The following line is modified by script.\' '
'-e \'s!^CROS_WORKON_REPO=".*"$!CROS_WORKON_REPO="{0}"!\' '
'-e \'/^CROS_WORKON_PROJECT=".*"/i'
' # The following line is modified by script.\' '
'-e \'s!^CROS_WORKON_PROJECT=.*$!CROS_WORKON_PROJECT="{1}"!\' '
'-e \'/^CROS_WORKON_COMMIT=".*"/i'
' # The following line is modified by script.\' '
'-e \'s!^CROS_WORKON_COMMIT=".*"$!CROS_WORKON_COMMIT="{2}"!\' '
'-e \'/^CROS_WORKON_TREE=".*"/i'
' # The following line is modified by script.\' '
'-e \'s!^CROS_WORKON_TREE=".*"$!CROS_WORKON_TREE="{3}"!\' '
'{4}').format('/home/{}/ndk-root'.format(os.environ['USER']),
tool_components, tool_branch_githash,
tool_branch_tree, tool_ebuild_file)
rv = self._ce.RunCommand(command)
if rv:
self._logger.LogError(
'Failed to modify commit and tree value for "{0}"", aborted.'.format(
tool_ebuild_file))
return False
# Warn that the ebuild file has been modified.
self._logger.LogWarning(
('Ebuild file "{0}" is modified, to revert the file - \n'
'bootstrap_compiler.py --chromeos_root={1} '
'--reset_tool_ebuild_file').format(tool_ebuild_file,
self._chromeos_root))
return True
def DoBuildForBoard(self):
"""Build tool for a specific board.
Returns:
True if operation succeeds.
"""
if self._gcc_branch:
if not self.DoBuildToolForBoard('gcc'):
return False
if self._binutils_branch:
if not self.DoBuildToolForBoard('binutils'):
return False
return True
def DoBuildToolForBoard(self, tool_name):
"""Build a specific tool for a specific board.
Args:
tool_name: either "gcc" or "binutils"
Returns:
True if operation succeeds.
"""
chroot_ndk_root = os.path.join(self._chromeos_root, 'chroot', 'home',
os.environ['USER'], 'ndk-root')
self._ce.RunCommand('mkdir -p {}'.format(chroot_ndk_root))
if self._ce.RunCommand(
'sudo mount --bind {} {}'.format(self._ndk_dir, chroot_ndk_root)):
self._logger.LogError('Failed to mount ndk dir into chroot')
return False
try:
boards_to_build = self._board.split(',')
target_built = set()
failed = []
for board in boards_to_build:
if board == 'host':
command = 'sudo emerge sys-devel/{0}'.format(tool_name)
else:
target = misc.GetCtargetFromBoard(board, self._chromeos_root)
if not target:
self._logger.LogError(
'Unsupported board "{0}", skip.'.format(board))
failed.append(board)
continue
# Skip this board if we have already built for a board that has the
# same target.
if target in target_built:
self._logger.LogWarning(
'Skipping toolchain for board "{}"'.format(board))
continue
target_built.add(target)
command = 'sudo emerge cross-{0}/{1}'.format(target, tool_name)
rv = self._ce.ChrootRunCommand(
self._chromeos_root, command, print_to_console=True)
if rv:
self._logger.LogError(
'Build {0} failed for {1}, aborted.'.format(tool_name, board))
failed.append(board)
else:
self._logger.LogOutput(
'Successfully built {0} for board {1}.'.format(tool_name, board))
finally:
# Make sure we un-mount ndk-root before we leave here, regardless of the
# build result of the tool. Otherwise we may inadvertently delete ndk-root
# dir, which is not part of the chroot and could be disastrous.
if chroot_ndk_root:
if self._ce.RunCommand('sudo umount {}'.format(chroot_ndk_root)):
self._logger.LogWarning(
('Failed to umount "{}", please check '
'before deleting chroot.').format(chroot_ndk_root))
# Clean up soft links created during build.
self._ce.RunCommand('cd {}/toolchain/{} && git clean -df'.format(
self._ndk_dir, tool_name))
if failed:
self._logger.LogError(
'Failed to build {0} for the following board(s): "{1}"'.format(
tool_name, ' '.join(failed)))
return False
# All boards build successfully
return True
def DoBootstrapping(self):
"""Do bootstrapping the chroot.
This step firstly downloads a prestine sdk, then use this sdk to build the
new sdk, finally use the new sdk to build every host package.
Returns:
True if operation succeeds.
"""
logfile = os.path.join(self._chromeos_root, 'bootstrap.log')
command = 'cd "{0}" && cros_sdk --delete --bootstrap |& tee "{1}"'.format(
self._chromeos_root, logfile)
rv = self._ce.RunCommand(command, print_to_console=True)
if rv:
self._logger.LogError(
'Bootstrapping failed, log file - "{0}"\n'.format(logfile))
return False
self._logger.LogOutput('Bootstrap succeeded.')
return True
def BuildAndInstallAmd64Host(self):
"""Build amd64-host (host) packages.
Build all host packages in the newly-bootstrapped 'chroot' using *NEW*
toolchain.
So actually we perform 2 builds of all host packages -
1. build new toolchain using old toolchain and build all host packages
using the newly built toolchain
2. build the new toolchain again but using new toolchain built in step 1,
and build all host packages using the newly built toolchain
Returns:
True if operation succeeds.
"""
cmd = ('cd {0} && cros_sdk -- -- ./setup_board --board=amd64-host '
'--accept_licenses=@CHROMEOS --skip_chroot_upgrade --nousepkg '
'--reuse_pkgs_from_local_boards').format(self._chromeos_root)
rv = self._ce.RunCommand(cmd, print_to_console=True)
if rv:
self._logger.LogError('Build amd64-host failed.')
return False
# Package amd64-host into 'built-sdk.tar.xz'.
sdk_package = os.path.join(self._chromeos_root, 'built-sdk.tar.xz')
cmd = ('cd {0}/chroot/build/amd64-host && sudo XZ_OPT="-e9" '
'tar --exclude="usr/lib/debug/*" --exclude="packages/*" '
'--exclude="tmp/*" --exclude="usr/local/build/autotest/*" '
'--sparse -I xz -vcf {1} . && sudo chmod a+r {1}').format(
self._chromeos_root, sdk_package)
rv = self._ce.RunCommand(cmd, print_to_console=True)
if rv:
self._logger.LogError('Failed to create "built-sdk.tar.xz".')
return False
# Install amd64-host into a new chroot.
cmd = ('cd {0} && cros_sdk --chroot new-sdk-chroot --download --replace '
'--nousepkg --url file://{1}').format(self._chromeos_root,
sdk_package)
rv = self._ce.RunCommand(cmd, print_to_console=True)
if rv:
self._logger.LogError('Failed to install "built-sdk.tar.xz".')
return False
self._logger.LogOutput(
'Successfully installed built-sdk.tar.xz into a new chroot.\nAll done.')
# Rename the newly created new-sdk-chroot to chroot.
cmd = ('cd {0} && sudo mv chroot chroot-old && '
'sudo mv new-sdk-chroot chroot').format(self._chromeos_root)
rv = self._ce.RunCommand(cmd, print_to_console=True)
return rv == 0
def Do(self):
"""Entrance of the class.
Returns:
True if everything is ok.
"""
if (self.SubmitToLocalBranch() and self.CheckoutBranch() and
self.FindEbuildFile() and self.InplaceModifyEbuildFile()):
if self._setup_tool_ebuild_file_only:
# Everything is done, we are good.
ret = True
else:
if self._board:
ret = self.DoBuildForBoard()
else:
# This implies '--bootstrap'.
ret = (self.DoBootstrapping() and (self._disable_2nd_bootstrap or
self.BuildAndInstallAmd64Host()))
else:
ret = False
return ret
def Main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-c',
'--chromeos_root',
dest='chromeos_root',
help=('Optional. ChromeOs root dir. '
'When not specified, chromeos root will be deduced'
' from current working directory.'))
parser.add_argument(
'--ndk_dir',
dest='ndk_dir',
help=('Topmost android ndk dir, required. '
'Do not need to include the "toolchain/*" part.'))
parser.add_argument(
'--gcc_branch',
dest='gcc_branch',
help=('The branch to test against. '
'This branch must be a local branch '
'inside "src/third_party/gcc". '
'Notice, this must not be used with "--gcc_dir".'))
parser.add_argument(
'--binutils_branch',
dest='binutils_branch',
help=('The branch to test against binutils. '
'This branch must be a local branch '
'inside "src/third_party/binutils". '
'Notice, this must not be used with '
'"--binutils_dir".'))
parser.add_argument(
'-g',
'--gcc_dir',
dest='gcc_dir',
help=('Use a local gcc tree to do bootstrapping. '
'Notice, this must not be used with '
'"--gcc_branch".'))
parser.add_argument(
'--binutils_dir',
dest='binutils_dir',
help=('Use a local binutils tree to do bootstrapping. '
'Notice, this must not be used with '
'"--binutils_branch".'))
parser.add_argument(
'--fixperm',
dest='fixperm',
default=False,
action='store_true',
help=('Fix the (notorious) permission error '
'while trying to bootstrap the chroot. '
'Note this takes an extra 10-15 minutes '
'and is only needed once per chromiumos tree.'))
parser.add_argument(
'--setup_tool_ebuild_file_only',
dest='setup_tool_ebuild_file_only',
default=False,
action='store_true',
help=('Setup gcc and/or binutils ebuild file '
'to pick up the branch (--gcc/binutils_branch) or '
'use gcc and/or binutils source '
'(--gcc/binutils_dir) and exit. Keep chroot as is.'
' This should not be used with '
'--gcc/binutils_dir/branch options.'))
parser.add_argument(
'--reset_tool_ebuild_file',
dest='reset_tool_ebuild_file',
default=False,
action='store_true',
help=('Reset the modification that is done by this '
'script. Note, when this script is running, it '
'will modify the active gcc/binutils ebuild file. '
'Use this option to reset (what this script has '
'done) and exit. This should not be used with -- '
'gcc/binutils_dir/branch options.'))
parser.add_argument(
'--board',
dest='board',
default=None,
help=('Only build toolchain for specific board(s). '
'Use "host" to build for host. '
'Use "," to seperate multiple boards. '
'This does not perform a chroot bootstrap.'))
parser.add_argument(
'--bootstrap',
dest='bootstrap',
default=False,
action='store_true',
help=('Performs a chroot bootstrap. '
'Note, this will *destroy* your current chroot.'))
parser.add_argument(
'--disable-2nd-bootstrap',
dest='disable_2nd_bootstrap',
default=False,
action='store_true',
help=('Disable a second bootstrap '
'(build of amd64-host stage).'))
options = parser.parse_args(argv)
# Trying to deduce chromeos root from current directory.
if not options.chromeos_root:
logger.GetLogger().LogOutput('Trying to deduce chromeos root ...')
wdir = os.getcwd()
while wdir and wdir != '/':
if misc.IsChromeOsTree(wdir):
logger.GetLogger().LogOutput('Find chromeos_root: {}'.format(wdir))
options.chromeos_root = wdir
break
wdir = os.path.dirname(wdir)
if not options.chromeos_root:
parser.error('Missing or failing to deduce mandatory option "--chromeos".')
return 1
options.chromeos_root = os.path.abspath(
os.path.expanduser(options.chromeos_root))
if not os.path.isdir(options.chromeos_root):
logger.GetLogger().LogError(
'"{0}" does not exist.'.format(options.chromeos_root))
return 1
options.ndk_dir = os.path.expanduser(options.ndk_dir)
if not options.ndk_dir:
parser.error('Missing mandatory option "--ndk_dir".')
return 1
# Some tolerance regarding user input. We only need the ndk_root part, do not
# include toolchain/(gcc|binutils)/ part in this option.
options.ndk_dir = re.sub('/toolchain(/gcc|/binutils)?/?$', '',
options.ndk_dir)
if not (os.path.isdir(options.ndk_dir) and
os.path.isdir(os.path.join(options.ndk_dir, 'toolchain'))):
logger.GetLogger().LogError(
'"toolchain" directory not found under "{0}".'.format(options.ndk_dir))
return 1
if options.fixperm:
# Fix perm error before continuing.
cmd = (r'sudo find "{0}" \( -name ".cache" -type d -prune \) -o '
r'\( -name "chroot" -type d -prune \) -o '
r'\( -type f -exec chmod a+r {{}} \; \) -o '
r'\( -type d -exec chmod a+rx {{}} \; \)'
).format(options.chromeos_root)
logger.GetLogger().LogOutput(
'Fixing perm issues for chromeos root, this might take some time.')
command_executer.GetCommandExecuter().RunCommand(cmd)
if options.reset_tool_ebuild_file:
if (options.gcc_dir or options.gcc_branch or options.binutils_dir or
options.binutils_branch):
logger.GetLogger().LogWarning(
'Ignoring any "--gcc/binutils_dir" and/or "--gcc/binutils_branch".')
if options.setup_tool_ebuild_file_only:
logger.GetLogger().LogError(
('Conflict options "--reset_tool_ebuild_file" '
'and "--setup_tool_ebuild_file_only".'))
return 1
rv = Bootstrapper.ResetToolEbuildFile(options.chromeos_root, 'gcc')
rv1 = Bootstrapper.ResetToolEbuildFile(options.chromeos_root, 'binutils')
return 0 if (rv and rv1) else 1
if options.gcc_dir:
options.gcc_dir = os.path.abspath(os.path.expanduser(options.gcc_dir))
if not os.path.isdir(options.gcc_dir):
logger.GetLogger().LogError(
'"{0}" does not exist.'.format(options.gcc_dir))
return 1
if options.gcc_branch and options.gcc_dir:
parser.error('Only one of "--gcc_dir" and "--gcc_branch" can be specified.')
return 1
if options.binutils_dir:
options.binutils_dir = os.path.abspath(
os.path.expanduser(options.binutils_dir))
if not os.path.isdir(options.binutils_dir):
logger.GetLogger().LogError(
'"{0}" does not exist.'.format(options.binutils_dir))
return 1
if options.binutils_branch and options.binutils_dir:
parser.error('Only one of "--binutils_dir" and '
'"--binutils_branch" can be specified.')
return 1
if (not (options.binutils_branch or options.binutils_dir or
options.gcc_branch or options.gcc_dir)):
parser.error(('At least one of "--gcc_dir", "--gcc_branch", '
'"--binutils_dir" and "--binutils_branch" must '
'be specified.'))
return 1
if not options.board and not options.bootstrap:
parser.error('You must specify either "--board" or "--bootstrap".')
return 1
if (options.board and options.bootstrap and
not options.setup_tool_ebuild_file_only):
parser.error('You must specify only one of "--board" and "--bootstrap".')
return 1
if not options.bootstrap and options.disable_2nd_bootstrap:
parser.error('"--disable-2nd-bootstrap" has no effect '
'without specifying "--bootstrap".')
return 1
if Bootstrapper(
options.chromeos_root,
options.ndk_dir,
gcc_branch=options.gcc_branch,
gcc_dir=options.gcc_dir,
binutils_branch=options.binutils_branch,
binutils_dir=options.binutils_dir,
board=options.board,
disable_2nd_bootstrap=options.disable_2nd_bootstrap,
setup_tool_ebuild_file_only=options.setup_tool_ebuild_file_only).Do():
return 0
return 1
if __name__ == '__main__':
retval = Main(sys.argv[1:])
sys.exit(retval)
| [
"[email protected]"
] | |
529b9a96885880408cf9b4499a0d6e5912c55327 | 87e88e72991cc83eff200fb87cc3985d383e3292 | /FPV_ANN/utils/resBlock.py | d64ec3238350a96644f8811f0ab578ff8330b10e | [
"MIT"
] | permissive | ppbreda/combustionML | c04f3418d891a91af07c4522c507fef186743f1e | b441907276c39f127b8914b78656581b0bcde359 | refs/heads/master | 2020-04-29T18:05:47.425570 | 2019-03-15T08:40:36 | 2019-03-15T08:40:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | from keras.models import Model
from keras.layers import Dense, Activation, Input, BatchNormalization, Dropout, concatenate
from keras import layers
from keras.callbacks import ModelCheckpoint
def res_block_org(input_tensor, n_neuron, stage, block, bn=False):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Dense(n_neuron, name=conv_name_base + '2a')(input_tensor)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Dropout(0.)(x)
x = Dense(n_neuron, name=conv_name_base + '2b')(x)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2b')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
x = Dropout(0.)(x)
return x
def res_branch(bi, conv_name_base, bn_name_base, scale, input_tensor, n_neuron, stage, block, dp1, bn=False):
x_1 = Dense(scale * n_neuron, name=conv_name_base + '2a_' + str(bi))(input_tensor)
if bn:
x_1 = BatchNormalization(axis=-1, name=bn_name_base + '2a_' + str(bi))(x_1)
x_1 = Activation('relu')(x_1)
if dp1 > 0:
x_1 = Dropout(dp1)(x_1)
return x_1
# new resnet block implementation with bottle neck
def res_block(input_tensor, scale, n_neuron, stage, block, bn=False, branches=0):
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
# scale = 2
x = Dense(scale * n_neuron, name=conv_name_base + '2a')(input_tensor)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
dp1 = 0
if dp1 > 0:
x = Dropout(dp1)(x)
branch_list = [x]
for i in range(branches - 1):
branch_list.append(
res_branch(i, conv_name_base, bn_name_base, scale, input_tensor, n_neuron, stage, block, dp1, bn))
if branches - 1 > 0:
x = Dense(n_neuron, name=conv_name_base + '2b')(concatenate(branch_list, axis=-1))
# x = Dense(n_neuron, name=conv_name_base + '2b')(layers.add(branch_list))
else:
x = Dense(n_neuron, name=conv_name_base + '2b')(x)
if bn:
x = BatchNormalization(axis=-1, name=bn_name_base + '2b')(x)
x = layers.add([x, input_tensor])
x = Activation('relu')(x)
if dp1 > 0:
x = Dropout(dp1)(x)
return x
| [
"[email protected]"
] | |
90e6ad0aa33ddfa5468bf5f1af143002cab456a2 | cc1d44cf04e5b2b15bb296a434aad4ae4bcfc4be | /python3/euc_dist.py | e6536e74111caf0ec86e96ef6f4469cb1453d3bf | [] | no_license | ericosur/ericosur-snippet | dda2200546b13fb9b84632d115a0f4ca5e3d5c47 | 0309eeb614612f9a35843e2f45f4080ae03eaa81 | refs/heads/main | 2023-08-08T04:54:05.907435 | 2023-07-25T06:04:01 | 2023-07-25T06:04:01 | 23,057,196 | 2 | 1 | null | 2022-08-31T09:55:19 | 2014-08-18T03:18:52 | Perl | UTF-8 | Python | false | false | 217 | py | #!/usr/bin/python3
# coding: utf-8
''' euclidean distance '''
import numpy as np
def main():
''' main '''
m = np.array([3, 4])
d = np.linalg.norm(m)
print(d)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
f4d95a2a12b33964de49a3d66afb8e7ab64907f9 | 1b42b04a27c33bfa704f4f108c9986cd558d7545 | /external_libs/pyzmq-14.5.0/python3/ucs4/32bit/zmq/green/core.py | b3b8b89819bff69a02d830fe6da2dcd4026e6008 | [] | no_license | Nandan307/trex_stl_lib | addbe9e42cc69a84fba59b385b108f4f22165534 | 8b3a204308475bff79a6bb7dd1419bbf18c10ffd | refs/heads/master | 2021-01-25T00:33:17.552192 | 2018-02-28T14:41:56 | 2018-02-28T14:41:56 | 123,301,115 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 10,477 | py | #-----------------------------------------------------------------------------
# Copyright (C) 2011-2012 Travis Cline
#
# This file is part of pyzmq
# It is adapted from upstream project zeromq_gevent under the New BSD License
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
"""This module wraps the :class:`Socket` and :class:`Context` found in :mod:`pyzmq <zmq>` to be non blocking
"""
import sys
import time
import warnings
import zmq
from zmq import Context as _original_Context
from zmq import Socket as _original_Socket
from .poll import _Poller
import gevent
from gevent.event import AsyncResult
from gevent.hub import get_hub
if hasattr(zmq, 'RCVTIMEO'):
TIMEOS = (zmq.RCVTIMEO, zmq.SNDTIMEO)
else:
TIMEOS = ()
def _stop(evt):
"""simple wrapper for stopping an Event, allowing for method rename in gevent 1.0"""
try:
evt.stop()
except AttributeError as e:
# gevent<1.0 compat
evt.cancel()
class _Socket(_original_Socket):
"""Green version of :class:`zmq.Socket`
The following methods are overridden:
* send
* recv
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
is deferred to the hub if a ``zmq.EAGAIN`` (retry) error is raised.
The `__state_changed` method is triggered when the zmq.FD for the socket is
marked as readable and triggers the necessary read and write events (which
are waited for in the recv and send methods).
Some double underscore prefixes are used to minimize pollution of
:class:`zmq.Socket`'s namespace.
"""
__in_send_multipart = False
__in_recv_multipart = False
__writable = None
__readable = None
_state_event = None
_gevent_bug_timeout = 11.6 # timeout for not trusting gevent
_debug_gevent = False # turn on if you think gevent is missing events
_poller_class = _Poller
def __init__(self, context, socket_type):
_original_Socket.__init__(self, context, socket_type)
self.__in_send_multipart = False
self.__in_recv_multipart = False
self.__setup_events()
def __del__(self):
self.close()
def close(self, linger=None):
super(_Socket, self).close(linger)
self.__cleanup_events()
def __cleanup_events(self):
# close the _state_event event, keeps the number of active file descriptors down
if getattr(self, '_state_event', None):
_stop(self._state_event)
self._state_event = None
# if the socket has entered a close state resume any waiting greenlets
self.__writable.set()
self.__readable.set()
def __setup_events(self):
self.__readable = AsyncResult()
self.__writable = AsyncResult()
self.__readable.set()
self.__writable.set()
try:
self._state_event = get_hub().loop.io(self.getsockopt(zmq.FD), 1) # read state watcher
self._state_event.start(self.__state_changed)
except AttributeError:
# for gevent<1.0 compatibility
from gevent.core import read_event
self._state_event = read_event(self.getsockopt(zmq.FD), self.__state_changed, persist=True)
def __state_changed(self, event=None, _evtype=None):
if self.closed:
self.__cleanup_events()
return
try:
# avoid triggering __state_changed from inside __state_changed
events = super(_Socket, self).getsockopt(zmq.EVENTS)
except zmq.ZMQError as exc:
self.__writable.set_exception(exc)
self.__readable.set_exception(exc)
else:
if events & zmq.POLLOUT:
self.__writable.set()
if events & zmq.POLLIN:
self.__readable.set()
def _wait_write(self):
assert self.__writable.ready(), "Only one greenlet can be waiting on this event"
self.__writable = AsyncResult()
# timeout is because libzmq cannot be trusted to properly signal a new send event:
# this is effectively a maximum poll interval of 1s
tic = time.time()
dt = self._gevent_bug_timeout
if dt:
timeout = gevent.Timeout(seconds=dt)
else:
timeout = None
try:
if timeout:
timeout.start()
self.__writable.get(block=True)
except gevent.Timeout as t:
if t is not timeout:
raise
toc = time.time()
# gevent bug: get can raise timeout even on clean return
# don't display zmq bug warning for gevent bug (this is getting ridiculous)
if self._debug_gevent and timeout and toc-tic > dt and \
self.getsockopt(zmq.EVENTS) & zmq.POLLOUT:
print("BUG: gevent may have missed a libzmq send event on %i!" % self.FD, file=sys.stderr)
finally:
if timeout:
timeout.cancel()
self.__writable.set()
def _wait_read(self):
assert self.__readable.ready(), "Only one greenlet can be waiting on this event"
self.__readable = AsyncResult()
# timeout is because libzmq cannot always be trusted to play nice with libevent.
# I can only confirm that this actually happens for send, but lets be symmetrical
# with our dirty hacks.
# this is effectively a maximum poll interval of 1s
tic = time.time()
dt = self._gevent_bug_timeout
if dt:
timeout = gevent.Timeout(seconds=dt)
else:
timeout = None
try:
if timeout:
timeout.start()
self.__readable.get(block=True)
except gevent.Timeout as t:
if t is not timeout:
raise
toc = time.time()
# gevent bug: get can raise timeout even on clean return
# don't display zmq bug warning for gevent bug (this is getting ridiculous)
if self._debug_gevent and timeout and toc-tic > dt and \
self.getsockopt(zmq.EVENTS) & zmq.POLLIN:
print("BUG: gevent may have missed a libzmq recv event on %i!" % self.FD, file=sys.stderr)
finally:
if timeout:
timeout.cancel()
self.__readable.set()
def send(self, data, flags=0, copy=True, track=False):
"""send, which will only block current greenlet
state_changed always fires exactly once (success or fail) at the
end of this method.
"""
# if we're given the NOBLOCK flag act as normal and let the EAGAIN get raised
if flags & zmq.NOBLOCK:
try:
msg = super(_Socket, self).send(data, flags, copy, track)
finally:
if not self.__in_send_multipart:
self.__state_changed()
return msg
# ensure the zmq.NOBLOCK flag is part of flags
flags |= zmq.NOBLOCK
while True: # Attempt to complete this operation indefinitely, blocking the current greenlet
try:
# attempt the actual call
msg = super(_Socket, self).send(data, flags, copy, track)
except zmq.ZMQError as e:
# if the raised ZMQError is not EAGAIN, reraise
if e.errno != zmq.EAGAIN:
if not self.__in_send_multipart:
self.__state_changed()
raise
else:
if not self.__in_send_multipart:
self.__state_changed()
return msg
# defer to the event loop until we're notified the socket is writable
self._wait_write()
def recv(self, flags=0, copy=True, track=False):
"""recv, which will only block current greenlet
state_changed always fires exactly once (success or fail) at the
end of this method.
"""
if flags & zmq.NOBLOCK:
try:
msg = super(_Socket, self).recv(flags, copy, track)
finally:
if not self.__in_recv_multipart:
self.__state_changed()
return msg
flags |= zmq.NOBLOCK
while True:
try:
msg = super(_Socket, self).recv(flags, copy, track)
except zmq.ZMQError as e:
if e.errno != zmq.EAGAIN:
if not self.__in_recv_multipart:
self.__state_changed()
raise
else:
if not self.__in_recv_multipart:
self.__state_changed()
return msg
self._wait_read()
def send_multipart(self, *args, **kwargs):
"""wrap send_multipart to prevent state_changed on each partial send"""
self.__in_send_multipart = True
try:
msg = super(_Socket, self).send_multipart(*args, **kwargs)
finally:
self.__in_send_multipart = False
self.__state_changed()
return msg
def recv_multipart(self, *args, **kwargs):
"""wrap recv_multipart to prevent state_changed on each partial recv"""
self.__in_recv_multipart = True
try:
msg = super(_Socket, self).recv_multipart(*args, **kwargs)
finally:
self.__in_recv_multipart = False
self.__state_changed()
return msg
def get(self, opt):
"""trigger state_changed on getsockopt(EVENTS)"""
if opt in TIMEOS:
warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
optval = super(_Socket, self).get(opt)
if opt == zmq.EVENTS:
self.__state_changed()
return optval
def set(self, opt, val):
"""set socket option"""
if opt in TIMEOS:
warnings.warn("TIMEO socket options have no effect in zmq.green", UserWarning)
return super(_Socket, self).set(opt, val)
class _Context(_original_Context):
"""Replacement for :class:`zmq.Context`
Ensures that the greened Socket above is used in calls to `socket`.
"""
_socket_class = _Socket
| [
"[email protected]"
] | |
b714c4900618c7eaaade3b45b921035c9bc2929a | 222dbb2f43dccbd4538ef76798a26457edffe07c | /MFVI/distributions.py | 805669cb27a08efcecf3874559184aaabb9b9751 | [] | no_license | MJHutchinson/PytorchBayes | 9699351822416deeb61e95a34653580fdfbbb5ae | e95a9bd308c595b9603bdfb799288a0ed50cc7c6 | refs/heads/master | 2020-04-09T18:39:57.643468 | 2019-01-15T16:06:05 | 2019-01-15T16:06:05 | 160,519,698 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,433 | py | import torch
import numpy as np
log_2pi = np.log(2*np.pi)
class Distribution(object):
def pdf(self, x):
raise NotImplementedError
def logpdf(self, x):
raise NotImplementedError
def cdf(self, x):
raise NotImplementedError
def logcdf(self, x):
raise NotImplementedError
def sample(self):
raise NotImplementedError
def forward(self, x):
raise NotImplementedError
class Normal(Distribution):
def __init__(self, mu, logvar):
self.mu = mu
self.logvar = logvar
self.shape = mu.size()
def logpdf(self, x):
return -0.5 * log_2pi \
- 0.5 * self.logvar \
- (x - self.mu).pow(2) / (2 * torch.exp(self.logvar))
def pdf(self, x):
return torch.exp(self.logpdf(x))
def sample(self):
if self.mu.is_cuda:
eps = torch.cuda.FloatTensor(self.shape).normal_()
else:
eps = torch.FloatTensor(self.shape).normal_()
return self.mu + torch.exp(0.5 * self.logvar) * eps
def kl(self, distribution):
if isinstance(distribution, self.__class__):
const_term = -0.5
log_var_diff = 0.5 * (-self.logvar + distribution.logvar)
mu_diff_term = 0.5 * ((self.mu - distribution.mu) ** 2 + torch.exp(self.logvar))/torch.exp(distribution.logvar)
return const_term + log_var_diff + mu_diff_term
| [
"[email protected]"
] | |
8ec1b7d766ec47742ccc9d987470864ab0da2c88 | 33338ccfe04112e7b7ea09aea240187cee5bab3f | /examples/motion.py | 35867e74d44317195e2ecb4b470fc58a09d5d516 | [
"MIT"
] | permissive | levkovigor/pmw3901-python-pimoroni | b944e7e3f1b4b43fcd0259cbdd4d6548148fbacb | bf798f6d58a1527563822d152d2650cc72e06e09 | refs/heads/master | 2023-08-02T22:53:24.578868 | 2021-09-16T15:05:11 | 2021-09-16T15:05:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,366 | py | #!/usr/bin/env python
import time
import argparse
from pmw3901 import PMW3901, PAA5100, BG_CS_FRONT_BCM, BG_CS_BACK_BCM
print("""motion.py - Detect flow/motion in front of the PMW3901 sensor.
Press Ctrl+C to exit!
""")
parser = argparse.ArgumentParser()
parser.add_argument('--board', type=str,
choices=['pmw3901', 'paa5100'],
required=True,
help='Breakout type.')
parser.add_argument('--rotation', type=int,
default=0, choices=[0, 90, 180, 270],
help='Rotation of sensor in degrees.')
parser.add_argument('--spi-slot', type=str,
default='front', choices=['front', 'back'],
help='Breakout Garden SPI slot.')
args = parser.parse_args()
# Pick the right class for the specified breakout
SensorClass = PMW3901 if args.board == 'pmw3901' else PAA5100
flo = SensorClass(spi_port=0, spi_cs=1, spi_cs_gpio=BG_CS_FRONT_BCM if args.spi_slot == 'front' else BG_CS_BACK_BCM)
flo.set_rotation(args.rotation)
tx = 0
ty = 0
try:
while True:
try:
x, y = flo.get_motion()
except RuntimeError:
continue
tx += x
ty += y
print("Relative: x {:03d} y {:03d} | Absolute: x {:03d} y {:03d}".format(x, y, tx, ty))
time.sleep(0.01)
except KeyboardInterrupt:
pass
| [
"[email protected]"
] | |
dfbd20650f4b6a5afeb3dfbebec177b237cde843 | 2682d6f8d8e5740b0dee8fe7fbf939ff78dc1b3b | /tests/unit/test_parser.py | c164306fa7e3333b77e1faa5e7920d0d1bef684f | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | skornev/apacheconfig | c5bb443538c2b03cf9069962595487460ac20568 | 7126dbc0a547779276ac04cf32a051c26781c464 | refs/heads/master | 2020-06-10T02:04:56.042451 | 2018-12-29T21:21:22 | 2018-12-29T21:21:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,430 | py | #
# This file is part of apacheconfig software.
#
# Copyright (c) 2018-2019, Ilya Etingof <[email protected]>
# License: https://github.com/etingof/apacheconfig/LICENSE.rst
#
import sys
from apacheconfig import *
try:
import unittest2 as unittest
except ImportError:
import unittest
class ParserTestCase(unittest.TestCase):
def testOptionAndValue(self):
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='statement')
ast = parser.parse('a b\n')
self.assertEqual(ast, ['statement', 'a', 'b'])
ast = parser.parse('a=b\n')
self.assertEqual(ast, ['statement', 'a', 'b'])
ast = parser.parse('a "b c"\n')
self.assertEqual(ast, ['statement', 'a', 'b c'])
def testHashComments(self):
text = """\
#a
# b
c c# c
c \# # c
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents',
['comment', 'a'],
['comment', ' b'],
['statements', ['statement', 'c', 'c']],
['comment', ' c'],
['statements', ['statement', 'c', '# # c']]])
def testCStyleComments(self):
text = """\
/*a*/
/*
# b
*/
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents', ['comment', 'a'], ['comment', '\n# b\n']])
def testCStyleCommentsDisabled(self):
text = """\
/*a*/
/*
# b
*/
"""
options = {
'ccomments': False
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
self.assertRaises(ApacheConfigError, parser.parse, text)
def testIncludes(self):
text = """\
include first.conf
<<include second.conf>>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents', ['include', 'first.conf'], ['include', 'second.conf']])
def testApacheIncludesDisabled(self):
text = """\
include first.conf
<<include second.conf>>
"""
options = {
'useapacheincludes': False
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents', ['include', 'first.conf'], ['include', 'second.conf']])
def testOptionAndValueSet(self):
text = """\
a b
a = b
a b
a= b
a =b
a b
a "b"
a = "b"
a = 'b'
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='statements')
ast = parser.parse(text)
self.assertEqual(ast, ['statements',
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b'],
['statement', 'a', 'b']])
def testBlockWithOptions(self):
text = """\
<a>
#a
a = "b b"
# a b
a = "b b"
</a>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='block')
ast = parser.parse(text)
self.assertEqual(ast, ['block', 'a',
['contents',
['comment', 'a'],
['statements',
['statement', 'a', 'b b']],
['comment', ' a b'],
['statements',
['statement', 'a', 'b b']]], 'a'])
def testNestedBlock(self):
text = """\
<a>
<b>
<c>
</c>
</b>
</a>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='block')
ast = parser.parse(text)
self.assertEqual(ast, ['block', 'a',
['contents',
['block', 'b',
['contents',
['block', 'c', [], 'c']], 'b']], 'a'])
def testEmptyBlocks(self):
text = """\
<a/>
<b/>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents',
['block', 'a', [], 'a'],
['block', 'b', [], 'b']])
def testNamedEmptyBlocks(self):
text = """\
<a A/>
<b B />
</b B />
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents',
['block', 'a A', [], 'a A'],
['block', 'b B /', [], 'b B /']])
def testLowerCaseNames(self):
text = """\
<A/>
<aA>
Bb Cc
</aA>
"""
options = {
'lowercasenames': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents',
['block', 'a', [], 'a'],
['block', 'aa',
['contents',
['statements', ['statement', 'bb', 'Cc']]],
'aa']])
def testNoStripValues(self):
text = """\
<aA>
Bb Cc \
</aA>
"""
options = {
'nostripvalues': True
}
ApacheConfigLexer = make_lexer(**options)
ApacheConfigParser = make_parser(**options)
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents',
['block', 'aA',
['contents',
['statements', ['statement', 'Bb', 'Cc ']]],
'aA']])
def testHereDoc(self):
text = """\
<main>
PYTHON <<MYPYTHON
def a():
x = y
return
MYPYTHON
</main>
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='contents')
ast = parser.parse(text)
self.assertEqual(ast, ['contents',
['block', 'main',
['contents',
['statements',
['statement',
'PYTHON', ' def a():\n x = y\n return'
]
]
],
'main']])
def testWholeConfig(self):
text = """\
# a
a = b
<a>
a = b
</a>
a b
<a a>
a b \
c = d
# c
</a a>
# a
"""
ApacheConfigLexer = make_lexer()
ApacheConfigParser = make_parser()
parser = ApacheConfigParser(ApacheConfigLexer(), start='config')
ast = parser.parse(text)
self.assertEqual(ast, [
'config',
['contents',
['comment', ' a'],
['statements', ['statement', 'a', 'b']],
['block', 'a',
['contents', ['statements', ['statement', 'a', 'b']]],
'a'],
['statements', ['statement', 'a', 'b']],
['block', 'a a',
['contents',
['statements',
['statement', 'a', 'b'],
['statement', 'c', 'd']],
['comment', ' c']],
'a a'],
['comment', ' a']]
])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite)
| [
"[email protected]"
] | |
2c5b6c337f6de90230dea0f80085dee7f5c7bcbe | c8b541ea4fa7d159b80bef116e5cd232ac61b8c1 | /venv/Lib/test/test_importlib/test_abc.py | 56a14ad90c71993bff5f3474c9d38130fb76bf32 | [] | no_license | shengmenghui/knowledge_building | 7a2d8eef040c2d3a45726b3a908be301e922024b | 04fd7784f15535efed917cce44856526f1f0ce48 | refs/heads/master | 2022-12-31T14:18:05.282092 | 2020-10-23T02:51:37 | 2020-10-23T02:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,243 | py | import contextlib
import inspect
import io
import marshal
import os
import sys
from sql_mode import support
import types
import unittest
from unittest import mock
import warnings
from . import util as test_util
init = test_util.import_importlib('importlib')
abc = test_util.import_importlib('importlib.abc')
machinery = test_util.import_importlib('importlib.machinery')
util = test_util.import_importlib('importlib.util')
##### Inheritance ##############################################################
class InheritanceTests:
"""Test that the specified class is a subclass/superclass of the expected
classes."""
subclasses = []
superclasses = []
def setUp(self):
self.superclasses = [getattr(self.abc, class_name)
for class_name in self.superclass_names]
if hasattr(self, 'subclass_names'):
# Because test.support.import_fresh_module() creates a new
# importlib._bootstrap per module, inheritance checks fail when
# checking across module boundaries (i.e. the _bootstrap in abc is
# not the same as the one in machinery). That means stealing one of
# the modules from the other to make sure the same instance is used.
machinery = self.abc.machinery
self.subclasses = [getattr(machinery, class_name)
for class_name in self.subclass_names]
assert self.subclasses or self.superclasses, self.__class__
self.__test = getattr(self.abc, self._NAME)
def test_subclasses(self):
# Test that the expected subclasses inherit.
for subclass in self.subclasses:
self.assertTrue(issubclass(subclass, self.__test),
"{0} is not a subclass of {1}".format(subclass, self.__test))
def test_superclasses(self):
# Test that the class inherits from the expected superclasses.
for superclass in self.superclasses:
self.assertTrue(issubclass(self.__test, superclass),
"{0} is not a superclass of {1}".format(superclass, self.__test))
class MetaPathFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'PathFinder',
'WindowsRegistryFinder']
(Frozen_MetaPathFinderInheritanceTests,
Source_MetaPathFinderInheritanceTests
) = test_util.test_both(MetaPathFinder, abc=abc)
class PathEntryFinder(InheritanceTests):
superclass_names = ['Finder']
subclass_names = ['FileFinder']
(Frozen_PathEntryFinderInheritanceTests,
Source_PathEntryFinderInheritanceTests
) = test_util.test_both(PathEntryFinder, abc=abc)
class ResourceLoader(InheritanceTests):
superclass_names = ['Loader']
(Frozen_ResourceLoaderInheritanceTests,
Source_ResourceLoaderInheritanceTests
) = test_util.test_both(ResourceLoader, abc=abc)
class InspectLoader(InheritanceTests):
superclass_names = ['Loader']
subclass_names = ['BuiltinImporter', 'FrozenImporter', 'ExtensionFileLoader']
(Frozen_InspectLoaderInheritanceTests,
Source_InspectLoaderInheritanceTests
) = test_util.test_both(InspectLoader, abc=abc)
class ExecutionLoader(InheritanceTests):
superclass_names = ['InspectLoader']
subclass_names = ['ExtensionFileLoader']
(Frozen_ExecutionLoaderInheritanceTests,
Source_ExecutionLoaderInheritanceTests
) = test_util.test_both(ExecutionLoader, abc=abc)
class FileLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader', 'SourcelessFileLoader']
(Frozen_FileLoaderInheritanceTests,
Source_FileLoaderInheritanceTests
) = test_util.test_both(FileLoader, abc=abc)
class SourceLoader(InheritanceTests):
superclass_names = ['ResourceLoader', 'ExecutionLoader']
subclass_names = ['SourceFileLoader']
(Frozen_SourceLoaderInheritanceTests,
Source_SourceLoaderInheritanceTests
) = test_util.test_both(SourceLoader, abc=abc)
##### Default return values ####################################################
def make_abc_subclasses(base_class, name=None, inst=False, **kwargs):
if name is None:
name = base_class.__name__
base = {kind: getattr(splitabc, name)
for kind, splitabc in abc.items()}
return {cls._KIND: cls() if inst else cls
for cls in test_util.split_frozen(base_class, base, **kwargs)}
class ABCTestHarness:
@property
def ins(self):
# Lazily set ins on the class.
cls = self.SPLIT[self._KIND]
ins = cls()
self.__class__.ins = ins
return ins
class MetaPathFinder:
def find_module(self, fullname, path):
return super().find_module(fullname, path)
class MetaPathFinderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(MetaPathFinder)
def test_find_module(self):
# Default should return None.
self.assertIsNone(self.ins.find_module('something', None))
def test_invalidate_caches(self):
# Calling the method is a no-op.
self.ins.invalidate_caches()
(Frozen_MPFDefaultTests,
Source_MPFDefaultTests
) = test_util.test_both(MetaPathFinderDefaultsTests)
class PathEntryFinder:
def find_loader(self, fullname):
return super().find_loader(fullname)
class PathEntryFinderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(PathEntryFinder)
def test_find_loader(self):
self.assertEqual((None, []), self.ins.find_loader('something'))
def find_module(self):
self.assertEqual(None, self.ins.find_module('something'))
def test_invalidate_caches(self):
# Should be a no-op.
self.ins.invalidate_caches()
(Frozen_PEFDefaultTests,
Source_PEFDefaultTests
) = test_util.test_both(PathEntryFinderDefaultsTests)
class Loader:
def load_module(self, fullname):
return super().load_module(fullname)
class LoaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(Loader)
def test_create_module(self):
spec = 'a spec'
self.assertIsNone(self.ins.create_module(spec))
def test_load_module(self):
with self.assertRaises(ImportError):
self.ins.load_module('something')
def test_module_repr(self):
mod = types.ModuleType('blah')
with self.assertRaises(NotImplementedError):
self.ins.module_repr(mod)
original_repr = repr(mod)
mod.__loader__ = self.ins
# Should still return a proper repr.
self.assertTrue(repr(mod))
(Frozen_LDefaultTests,
SourceLDefaultTests
) = test_util.test_both(LoaderDefaultsTests)
class ResourceLoader(Loader):
def get_data(self, path):
return super().get_data(path)
class ResourceLoaderDefaultsTests(ABCTestHarness):
SPLIT = make_abc_subclasses(ResourceLoader)
def test_get_data(self):
with self.assertRaises(IOError):
self.ins.get_data('/some/path')
(Frozen_RLDefaultTests,
Source_RLDefaultTests
) = test_util.test_both(ResourceLoaderDefaultsTests)
class InspectLoader(Loader):
def is_package(self, fullname):
return super().is_package(fullname)
def get_source(self, fullname):
return super().get_source(fullname)
SPLIT_IL = make_abc_subclasses(InspectLoader)
class InspectLoaderDefaultsTests(ABCTestHarness):
SPLIT = SPLIT_IL
def test_is_package(self):
with self.assertRaises(ImportError):
self.ins.is_package('blah')
def test_get_source(self):
with self.assertRaises(ImportError):
self.ins.get_source('blah')
(Frozen_ILDefaultTests,
Source_ILDefaultTests
) = test_util.test_both(InspectLoaderDefaultsTests)
class ExecutionLoader(InspectLoader):
def get_filename(self, fullname):
return super().get_filename(fullname)
SPLIT_EL = make_abc_subclasses(ExecutionLoader)
class ExecutionLoaderDefaultsTests(ABCTestHarness):
SPLIT = SPLIT_EL
def test_get_filename(self):
with self.assertRaises(ImportError):
self.ins.get_filename('blah')
(Frozen_ELDefaultTests,
Source_ELDefaultsTests
) = test_util.test_both(InspectLoaderDefaultsTests)
##### MetaPathFinder concrete methods ##########################################
class MetaPathFinderFindModuleTests:
@classmethod
def finder(cls, spec):
class MetaPathSpecFinder(cls.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
self.called_for = fullname, path
return spec
return MetaPathSpecFinder()
def test_no_spec(self):
finder = self.finder(None)
path = ['a', 'b', 'c']
name = 'blah'
found = finder.find_module(name, path)
self.assertIsNone(found)
self.assertEqual(name, finder.called_for[0])
self.assertEqual(path, finder.called_for[1])
def test_spec(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
found = finder.find_module('blah', None)
self.assertIs(found, spec.loader)
(Frozen_MPFFindModuleTests,
Source_MPFFindModuleTests
) = test_util.test_both(MetaPathFinderFindModuleTests, abc=abc, util=util)
##### PathEntryFinder concrete methods #########################################
class PathEntryFinderFindLoaderTests:
@classmethod
def finder(cls, spec):
class PathEntrySpecFinder(cls.abc.PathEntryFinder):
def find_spec(self, fullname, target=None):
self.called_for = fullname
return spec
return PathEntrySpecFinder()
def test_no_spec(self):
finder = self.finder(None)
name = 'blah'
found = finder.find_loader(name)
self.assertIsNone(found[0])
self.assertEqual([], found[1])
self.assertEqual(name, finder.called_for)
def test_spec_with_loader(self):
loader = object()
spec = self.util.spec_from_loader('blah', loader)
finder = self.finder(spec)
found = finder.find_loader('blah')
self.assertIs(found[0], spec.loader)
def test_spec_with_portions(self):
spec = self.machinery.ModuleSpec('blah', None)
paths = ['a', 'b', 'c']
spec.submodule_search_locations = paths
finder = self.finder(spec)
found = finder.find_loader('blah')
self.assertIsNone(found[0])
self.assertEqual(paths, found[1])
(Frozen_PEFFindLoaderTests,
Source_PEFFindLoaderTests
) = test_util.test_both(PathEntryFinderFindLoaderTests, abc=abc, util=util,
machinery=machinery)
##### Loader concrete methods ##################################################
class LoaderLoadModuleTests:
def loader(self):
class SpecLoader(self.abc.Loader):
found = None
def exec_module(self, module):
self.found = module
def is_package(self, fullname):
"""Force some non-default module state to be set."""
return True
return SpecLoader()
def test_fresh(self):
loader = self.loader()
name = 'blah'
with test_util.uncache(name):
loader.load_module(name)
module = loader.found
self.assertIs(sys.modules[name], module)
self.assertEqual(loader, module.__loader__)
self.assertEqual(loader, module.__spec__.loader)
self.assertEqual(name, module.__name__)
self.assertEqual(name, module.__spec__.name)
self.assertIsNotNone(module.__path__)
self.assertIsNotNone(module.__path__,
module.__spec__.submodule_search_locations)
def test_reload(self):
name = 'blah'
loader = self.loader()
module = types.ModuleType(name)
module.__spec__ = self.util.spec_from_loader(name, loader)
module.__loader__ = loader
with test_util.uncache(name):
sys.modules[name] = module
loader.load_module(name)
found = loader.found
self.assertIs(found, sys.modules[name])
self.assertIs(module, sys.modules[name])
(Frozen_LoaderLoadModuleTests,
Source_LoaderLoadModuleTests
) = test_util.test_both(LoaderLoadModuleTests, abc=abc, util=util)
##### InspectLoader concrete methods ###########################################
class InspectLoaderSourceToCodeTests:
def source_to_module(self, data, path=None):
"""Help with source_to_code() tests."""
module = types.ModuleType('blah')
loader = self.InspectLoaderSubclass()
if path is None:
code = loader.source_to_code(data)
else:
code = loader.source_to_code(data, path)
exec(code, module.__dict__)
return module
def test_source_to_code_source(self):
# Since compile() can handle strings, so should source_to_code().
source = 'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_bytes(self):
# Since compile() can handle bytes, so should source_to_code().
source = b'attr = 42'
module = self.source_to_module(source)
self.assertTrue(hasattr(module, 'attr'))
self.assertEqual(module.attr, 42)
def test_source_to_code_path(self):
# Specifying a path should set it for the code object.
path = 'path/to/somewhere'
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('', path)
self.assertEqual(code.co_filename, path)
def test_source_to_code_no_path(self):
# Not setting a path should still work and be set to <string> since that
# is a pre-existing practice as a default to compile().
loader = self.InspectLoaderSubclass()
code = loader.source_to_code('')
self.assertEqual(code.co_filename, '<string>')
(Frozen_ILSourceToCodeTests,
Source_ILSourceToCodeTests
) = test_util.test_both(InspectLoaderSourceToCodeTests,
InspectLoaderSubclass=SPLIT_IL)
class InspectLoaderGetCodeTests:
def test_get_code(self):
# Test success.
module = types.ModuleType('blah')
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = 'attr = 42'
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
with mock.patch.object(self.InspectLoaderSubclass, 'get_source') as mocked:
mocked.return_value = None
loader = self.InspectLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.InspectLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
(Frozen_ILGetCodeTests,
Source_ILGetCodeTests
) = test_util.test_both(InspectLoaderGetCodeTests,
InspectLoaderSubclass=SPLIT_IL)
class InspectLoaderLoadModuleTests:
"""Test InspectLoader.load_module()."""
module_name = 'blah'
def setUp(self):
support.unload(self.module_name)
self.addCleanup(support.unload, self.module_name)
def load(self, loader):
spec = self.util.spec_from_loader(self.module_name, loader)
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
return self.init._bootstrap._load_unlocked(spec)
def mock_get_code(self):
return mock.patch.object(self.InspectLoaderSubclass, 'get_code')
def test_get_code_ImportError(self):
# If get_code() raises ImportError, it should propagate.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.side_effect = ImportError
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
self.load(loader)
def test_get_code_None(self):
# If get_code() returns None, raise ImportError.
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = None
with self.assertRaises(ImportError):
loader = self.InspectLoaderSubclass()
self.load(loader)
def test_module_returned(self):
# The loaded module should be returned.
code = compile('attr = 42', '<string>', 'exec')
with self.mock_get_code() as mocked_get_code:
mocked_get_code.return_value = code
loader = self.InspectLoaderSubclass()
module = self.load(loader)
self.assertEqual(module, sys.modules[self.module_name])
(Frozen_ILLoadModuleTests,
Source_ILLoadModuleTests
) = test_util.test_both(InspectLoaderLoadModuleTests,
InspectLoaderSubclass=SPLIT_IL,
init=init,
util=util)
##### ExecutionLoader concrete methods #########################################
class ExecutionLoaderGetCodeTests:
def mock_methods(self, *, get_source=False, get_filename=False):
source_mock_context, filename_mock_context = None, None
if get_source:
source_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_source')
if get_filename:
filename_mock_context = mock.patch.object(self.ExecutionLoaderSubclass,
'get_filename')
return source_mock_context, filename_mock_context
def test_get_code(self):
path = 'blah.py'
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.return_value = path
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, path)
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
def test_get_code_source_is_None(self):
# If get_source() is None then this should be None.
source_mock_context, _ = self.mock_methods(get_source=True)
with source_mock_context as mocked:
mocked.return_value = None
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertIsNone(code)
def test_get_code_source_not_found(self):
# If there is no source then there is no code object.
loader = self.ExecutionLoaderSubclass()
with self.assertRaises(ImportError):
loader.get_code('blah')
def test_get_code_no_path(self):
# If get_filename() raises ImportError then simply skip setting the path
# on the code object.
source_mock_context, filename_mock_context = self.mock_methods(
get_source=True, get_filename=True)
with source_mock_context as source_mock, filename_mock_context as name_mock:
source_mock.return_value = 'attr = 42'
name_mock.side_effect = ImportError
loader = self.ExecutionLoaderSubclass()
code = loader.get_code('blah')
self.assertEqual(code.co_filename, '<string>')
module = types.ModuleType('blah')
exec(code, module.__dict__)
self.assertEqual(module.attr, 42)
(Frozen_ELGetCodeTests,
Source_ELGetCodeTests
) = test_util.test_both(ExecutionLoaderGetCodeTests,
ExecutionLoaderSubclass=SPLIT_EL)
##### SourceLoader concrete methods ############################################
class SourceOnlyLoader:
# Globals that should be defined for all modules.
source = (b"_ = '::'.join([__name__, __file__, __cached__, __package__, "
b"repr(__loader__)])")
def __init__(self, path):
self.path = path
def get_data(self, path):
if path != self.path:
raise IOError
return self.source
def get_filename(self, fullname):
return self.path
def module_repr(self, module):
return '<module>'
SPLIT_SOL = make_abc_subclasses(SourceOnlyLoader, 'SourceLoader')
class SourceLoader(SourceOnlyLoader):
source_mtime = 1
def __init__(self, path, magic=None):
super().__init__(path)
self.bytecode_path = self.util.cache_from_source(self.path)
self.source_size = len(self.source)
if magic is None:
magic = self.util.MAGIC_NUMBER
data = bytearray(magic)
data.extend(self.init._w_long(self.source_mtime))
data.extend(self.init._w_long(self.source_size))
code_object = compile(self.source, self.path, 'exec',
dont_inherit=True)
data.extend(marshal.dumps(code_object))
self.bytecode = bytes(data)
self.written = {}
def get_data(self, path):
if path == self.path:
return super().get_data(path)
elif path == self.bytecode_path:
return self.bytecode
else:
raise OSError
def path_stats(self, path):
if path != self.path:
raise IOError
return {'mtime': self.source_mtime, 'size': self.source_size}
def set_data(self, path, data):
self.written[path] = bytes(data)
return path == self.bytecode_path
SPLIT_SL = make_abc_subclasses(SourceLoader, util=util, init=init)
class SourceLoaderTestHarness:
def setUp(self, *, is_package=True, **kwargs):
self.package = 'pkg'
if is_package:
self.path = os.path.join(self.package, '__init__.py')
self.name = self.package
else:
module_name = 'mod'
self.path = os.path.join(self.package, '.'.join(['mod', 'py']))
self.name = '.'.join([self.package, module_name])
self.cached = self.util.cache_from_source(self.path)
self.loader = self.loader_mock(self.path, **kwargs)
def verify_module(self, module):
self.assertEqual(module.__name__, self.name)
self.assertEqual(module.__file__, self.path)
self.assertEqual(module.__cached__, self.cached)
self.assertEqual(module.__package__, self.package)
self.assertEqual(module.__loader__, self.loader)
values = module._.split('::')
self.assertEqual(values[0], self.name)
self.assertEqual(values[1], self.path)
self.assertEqual(values[2], self.cached)
self.assertEqual(values[3], self.package)
self.assertEqual(values[4], repr(self.loader))
def verify_code(self, code_object):
module = types.ModuleType(self.name)
module.__file__ = self.path
module.__cached__ = self.cached
module.__package__ = self.package
module.__loader__ = self.loader
module.__path__ = []
exec(code_object, module.__dict__)
self.verify_module(module)
class SourceOnlyLoaderTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader for source-only loading.
Reload testing is subsumed by the tests for
importlib.util.module_for_loader.
"""
def test_get_source(self):
# Verify the source code is returned as a string.
# If an OSError is raised by get_data then raise ImportError.
expected_source = self.loader.source.decode('utf-8')
self.assertEqual(self.loader.get_source(self.name), expected_source)
def raise_OSError(path):
raise OSError
self.loader.get_data = raise_OSError
with self.assertRaises(ImportError) as cm:
self.loader.get_source(self.name)
self.assertEqual(cm.exception.name, self.name)
def test_is_package(self):
# Properly detect when loading a package.
self.setUp(is_package=False)
self.assertFalse(self.loader.is_package(self.name))
self.setUp(is_package=True)
self.assertTrue(self.loader.is_package(self.name))
self.assertFalse(self.loader.is_package(self.name + '.__init__'))
def test_get_code(self):
# Verify the code object is created.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_source_to_code(self):
# Verify the compiled code object.
code = self.loader.source_to_code(self.loader.source, self.path)
self.verify_code(code)
def test_load_module(self):
# Loading a module should set __name__, __loader__, __package__,
# __path__ (for packages), __file__, and __cached__.
# The module should also be put into sys.modules.
with test_util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertEqual(module.__path__, [os.path.dirname(self.path)])
self.assertIn(self.name, sys.modules)
def test_package_settings(self):
# __package__ needs to be set, while __path__ is set on if the module
# is a package.
# Testing the values for a package are covered by test_load_module.
self.setUp(is_package=False)
with test_util.uncache(self.name):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
module = self.loader.load_module(self.name)
self.verify_module(module)
self.assertFalse(hasattr(module, '__path__'))
def test_get_source_encoding(self):
# Source is considered encoded in UTF-8 by default unless otherwise
# specified by an encoding line.
source = "_ = 'ü'"
self.loader.source = source.encode('utf-8')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
source = "# coding: latin-1\n_ = ü"
self.loader.source = source.encode('latin-1')
returned_source = self.loader.get_source(self.name)
self.assertEqual(returned_source, source)
(Frozen_SourceOnlyLoaderTests,
Source_SourceOnlyLoaderTests
) = test_util.test_both(SourceOnlyLoaderTests, util=util,
loader_mock=SPLIT_SOL)
@unittest.skipIf(sys.dont_write_bytecode, "sys.dont_write_bytecode is true")
class SourceLoaderBytecodeTests(SourceLoaderTestHarness):
"""Test importlib.abc.SourceLoader's use of bytecode.
Source-only testing handled by SourceOnlyLoaderTests.
"""
def verify_code(self, code_object, *, bytecode_written=False):
super().verify_code(code_object)
if bytecode_written:
self.assertIn(self.cached, self.loader.written)
data = bytearray(self.util.MAGIC_NUMBER)
data.extend(self.init._w_long(self.loader.source_mtime))
data.extend(self.init._w_long(self.loader.source_size))
data.extend(marshal.dumps(code_object))
self.assertEqual(self.loader.written[self.cached], bytes(data))
def test_code_with_everything(self):
# When everything should work.
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
def test_no_bytecode(self):
# If no bytecode exists then move on to the source.
self.loader.bytecode_path = "<does not exist>"
# Sanity check
with self.assertRaises(OSError):
bytecode_path = self.util.cache_from_source(self.path)
self.loader.get_data(bytecode_path)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_code_bad_timestamp(self):
# Bytecode is only used when the timestamp matches the source EXACTLY.
for source_mtime in (0, 2):
assert source_mtime != self.loader.source_mtime
original = self.loader.source_mtime
self.loader.source_mtime = source_mtime
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
self.loader.source_mtime = original
def test_code_bad_magic(self):
# Skip over bytecode with a bad magic number.
self.setUp(magic=b'0000')
# If bytecode is used then EOFError would be raised by marshal.
self.loader.bytecode = self.loader.bytecode[8:]
code_object = self.loader.get_code(self.name)
self.verify_code(code_object, bytecode_written=True)
def test_dont_write_bytecode(self):
# Bytecode is not written if sys.dont_write_bytecode is true.
# Can assume it is false already thanks to the skipIf class decorator.
try:
sys.dont_write_bytecode = True
self.loader.bytecode_path = "<does not exist>"
code_object = self.loader.get_code(self.name)
self.assertNotIn(self.cached, self.loader.written)
finally:
sys.dont_write_bytecode = False
def test_no_set_data(self):
# If set_data is not defined, one can still read bytecode.
self.setUp(magic=b'0000')
original_set_data = self.loader.__class__.mro()[1].set_data
try:
del self.loader.__class__.mro()[1].set_data
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
finally:
self.loader.__class__.mro()[1].set_data = original_set_data
def test_set_data_raises_exceptions(self):
# Raising NotImplementedError or OSError is okay for set_data.
def raise_exception(exc):
def closure(*args, **kwargs):
raise exc
return closure
self.setUp(magic=b'0000')
self.loader.set_data = raise_exception(NotImplementedError)
code_object = self.loader.get_code(self.name)
self.verify_code(code_object)
(Frozen_SLBytecodeTests,
SourceSLBytecodeTests
) = test_util.test_both(SourceLoaderBytecodeTests, init=init, util=util,
loader_mock=SPLIT_SL)
class SourceLoaderGetSourceTests:
"""Tests for importlib.abc.SourceLoader.get_source()."""
def test_default_encoding(self):
# Should have no problems with UTF-8 text.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = 'x = "ü"'
mock.source = source.encode('utf-8')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_decoded_source(self):
# Decoding should work.
name = 'mod'
mock = self.SourceOnlyLoaderMock("mod.file")
source = "# coding: Latin-1\nx='ü'"
assert source.encode('latin-1') != source.encode('utf-8')
mock.source = source.encode('latin-1')
returned_source = mock.get_source(name)
self.assertEqual(returned_source, source)
def test_universal_newlines(self):
# PEP 302 says universal newlines should be used.
name = 'mod'
mock = self.SourceOnlyLoaderMock('mod.file')
source = "x = 42\r\ny = -13\r\n"
mock.source = source.encode('utf-8')
expect = io.IncrementalNewlineDecoder(None, True).decode(source)
self.assertEqual(mock.get_source(name), expect)
(Frozen_SourceOnlyLoaderGetSourceTests,
Source_SourceOnlyLoaderGetSourceTests
) = test_util.test_both(SourceLoaderGetSourceTests,
SourceOnlyLoaderMock=SPLIT_SOL)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
664688d7026deca7dfe13f826be489819a058db9 | f188379dc9c1e5b63e432d434c782a4d6997872b | /7_Dictionaries/Exercises and More Exercises/11. Ranking.py | c8393f218b168b6acce06f6453a3b75ba56c95c2 | [] | no_license | GalyaBorislavova/SoftUni_Python_Fundamentals_January_2021 | 39d7eb8c28f60ff3c293855b074c49ac622a6036 | 7d479fd6c8e4136fb07b765458cc00088e09767a | refs/heads/main | 2023-06-15T04:16:17.084825 | 2021-06-30T18:05:42 | 2021-06-30T18:05:42 | 381,785,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,479 | py | data = input()
contest_with_pass = {}
while not data == "end of contests":
contest, password = data.split(":")
contest_with_pass[contest] = password
data = input()
data = input()
submissions = {}
while not data == "end of submissions":
contest, password, username, points = data.split("=>")
points = int(points)
if contest in contest_with_pass and contest_with_pass[contest] == password:
if username not in submissions:
submissions[username] = {contest: points}
if contest in submissions[username]:
if submissions[username][contest] < points:
submissions[username][contest] = points
else:
submissions[username][contest] = points
data = input()
sorted_submissions = {n: v for n, v in (sorted(submissions.items()))}
for key, value in sorted_submissions.items():
sorted_points = {k: p for k, p in sorted(value.items(), key=lambda x: -x[1])}
sorted_submissions[key] = sorted_points
max_points = 0
best_candidate = ''
for key, value in sorted_submissions.items():
current_points = 0
for c, p in value.items():
current_points += p
if current_points > max_points:
max_points = current_points
best_candidate = key
print(f"Best candidate is {best_candidate} with total {max_points} points.")
print("Ranking:")
for key, value in sorted_submissions.items():
print(key)
for c, p in value.items():
print(f"# {c} -> {p}") | [
"[email protected]"
] | |
d1c81615bd1d6e2713ae67c7f872c618bd7c96c9 | 174ad2b84041ee9dface6f822737cd30e39aa04d | /toRRentSeaRcher/Enviroments/Scripts/pip3.7-script.py | d3e3a76c9c075f0b26ffd2ab609440029926da9b | [] | no_license | duk1edev/projects | 88f26830ac16f0fc889cace8ffab756788ee0b24 | 74bdea8b56bee9e1c0d8a78405ad0ca7bad38543 | refs/heads/master | 2020-06-24T17:15:17.891008 | 2019-11-29T17:23:54 | 2019-11-29T17:23:54 | 199,027,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | #!C:\Users\duk1e\Desktop\toRRentSeaRcher\Enviroments\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.7'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.7')()
)
| [
"[email protected]"
] | |
60be12b528080a1558cd6e0f9d842020db02dc29 | 5a1a695829a2d1dbf4daa0736f0fbd6feffc7e63 | /0830/palindrome2.py | 0042014807add9a1a311863e8d36a88cf927c9d9 | [] | no_license | juyi212/Algorithm_study | f5d263c5329c994a457bbe897e5e1405d2b1d67a | f225cc593a50b74686111f654f7133707a1d1310 | refs/heads/master | 2023-03-21T20:02:36.138688 | 2021-03-16T14:16:40 | 2021-03-16T14:16:40 | 325,008,034 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 648 | py | import sys
sys.stdin=open("pal_input.txt","r")
def check(m):
global zwords
for i in range(100):
for j in range(100-m+1):
temp=words[i][j:j+m]
c_temp=zwords[i][j:j+m]
if temp==temp[::-1] or c_temp==c_temp[::-1]:
return True
return False
for tc in range(1,11):
t=int(input())
words=[list(input().rstrip()) for _ in range(100)]
zwords=list(zip(*words))
for i in range(100,0,-1):
if check(i): # 제일 긴 것을 구하는 것이기때문에 가장 긴 순으로 문자열의 길이를 설정해줌
result=i
break
print(result)
| [
"[email protected]"
] | |
4a7aae79d4dfa0b6ec639291564a1c9cc372c6c2 | 54f352a242a8ad6ff5516703e91da61e08d9a9e6 | /Source Codes/AtCoder/abc005/C/4909327.py | 2ce013dfa84bbabf437e5d6940c4493bb010db06 | [] | no_license | Kawser-nerd/CLCDSA | 5cbd8a4c3f65173e4e8e0d7ed845574c4770c3eb | aee32551795763b54acb26856ab239370cac4e75 | refs/heads/master | 2022-02-09T11:08:56.588303 | 2022-01-26T18:53:40 | 2022-01-26T18:53:40 | 211,783,197 | 23 | 9 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | t = int(input())
n = int(input())
a_list = sorted(list(map(int, input().split())))
m = int(input())
b_list = sorted(list(map(int, input().split())))
if n < m:
print('no')
else:
count = 0
for b in b_list:
for a in range(len(a_list)):
if b-a_list[a] <= t and b-a_list[a] >= 0:
count += 1
a_list.pop(a)
break
if count == len(b_list):
print('yes')
else:
print('no') | [
"[email protected]"
] | |
644a771174d5da427ab20d060a04b6064d28e480 | 7aec5d22b50ce8d4a18572396a4ab28d45dfcbef | /examples/coreir-tutorial/shift_register.py | 668484a02603c8bb433e6a21b40527233274e552 | [
"MIT"
] | permissive | phanrahan/magmathon | db4ad04bd37034d5cee9ee0b507ec64ca9d0f204 | 68c9be1df0569a9d5d076b1bd986ed5ee3562d54 | refs/heads/master | 2020-09-15T08:16:36.985371 | 2020-03-04T17:50:27 | 2020-03-04T17:50:27 | 66,949,165 | 13 | 3 | MIT | 2020-03-02T18:14:35 | 2016-08-30T14:39:43 | Jupyter Notebook | UTF-8 | Python | false | false | 1,654 | py |
# coding: utf-8
# In[1]:
import magma as m
m.set_mantle_target("coreir")
import mantle
def DefineShiftRegister(n, init=0, has_ce=False, has_reset=False):
class _ShiftRegister(m.Circuit):
name = 'ShiftRegister_{}_{}_{}_{}'.format(n, init, has_ce, has_reset)
IO = ['I', m.In(m.Bit), 'O', m.Out(m.Bit)] + m.ClockInterface(has_ce, has_reset)
@classmethod
def definition(siso):
ffs = mantle.FFs(n, init=init, has_ce=has_ce, has_reset=has_reset)
reg = m.braid(ffs, foldargs={"I":"O"})
reg(siso.I)
m.wire(reg.O, siso.O)
m.wireclock(siso, reg)
return _ShiftRegister
# In[2]:
m.compile("build/DefineShiftRegister.json", DefineShiftRegister(2, has_ce=True), output="coreir")
get_ipython().magic('cat build/DefineShiftRegister.json')
# In[3]:
from magma.simulator.coreir_simulator import CoreIRSimulator
from bit_vector import BitVector
N = 3
ShiftRegisterNCE = DefineShiftRegister(N, has_ce=True)
simulator = CoreIRSimulator(ShiftRegisterNCE, clock=ShiftRegisterNCE.CLK)
outputs = []
for j in range(2):
simulator.advance()
for I, enable in [(1, 1), (0, 1), (1, 1), (0, 1), (1, 0), (0, 0), (1, 1), (1, 1), (1, 1), (1, 1)]:
simulator.set_value(ShiftRegisterNCE.I, bool(I))
simulator.set_value(ShiftRegisterNCE.CE, bool(enable))
for j in range(2):
simulator.advance()
O = simulator.get_value(ShiftRegisterNCE.O)
CLK = simulator.get_value(ShiftRegisterNCE.CLK)
outputs.append([O, I, enable, CLK])
# In[4]:
from magma.waveform import waveform
waveform(outputs, ["O", "I", "CE", "CLK"])
| [
"[email protected]"
] | |
9aaa1431d7deb1edfcc6b43a72e07036fdc7127d | 72863e7278f4be8b5d63d999144f9eaec3e7ec48 | /venv/lib/python2.7/site-packages/git/util.py | 1dbbd35de70c5055b20ce511be046584c5cf97b0 | [
"MIT"
] | permissive | sravani-m/Web-Application-Security-Framework | 6e484b6c8642f47dac94e67b657a92fd0dbb6412 | d9f71538f5cba6fe1d8eabcb26c557565472f6a6 | refs/heads/master | 2020-04-26T11:54:01.334566 | 2019-05-03T19:17:30 | 2019-05-03T19:17:30 | 173,532,718 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,135 | py | # utils.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import contextlib
from functools import wraps
import getpass
import logging
import os
import platform
import subprocess
import re
import shutil
import stat
import time
try:
from unittest import SkipTest
except ImportError:
from unittest2 import SkipTest
from gitdb.util import (# NOQA @IgnorePep8
make_sha,
LockedFD, # @UnusedImport
file_contents_ro, # @UnusedImport
file_contents_ro_filepath, # @UnusedImport
LazyMixin, # @UnusedImport
to_hex_sha, # @UnusedImport
to_bin_sha, # @UnusedImport
bin_to_hex, # @UnusedImport
hex_to_bin, # @UnusedImport
)
from git.compat import is_win
import os.path as osp
from .compat import (
MAXSIZE,
defenc,
PY3
)
from .exc import InvalidGitRepositoryError
# NOTE: Some of the unused imports might be used/imported by others.
# Handle once test-cases are back up and running.
# Most of these are unused here, but are for use by git-python modules so these
# don't see gitdb all the time. Flake of course doesn't like it.
__all__ = ("stream_copy", "join_path", "to_native_path_windows", "to_native_path_linux",
"join_path_native", "Stats", "IndexFileSHA1Writer", "Iterable", "IterableList",
"BlockingLockFile", "LockFile", 'Actor', 'get_user_id', 'assure_directory_exists',
'RemoteProgress', 'CallableRemoteProgress', 'rmtree', 'unbare_repo',
'HIDE_WINDOWS_KNOWN_ERRORS')
log = logging.getLogger(__name__)
#: We need an easy way to see if Appveyor TCs start failing,
#: so the errors marked with this var are considered "acknowledged" ones, awaiting remedy,
#: till then, we wish to hide them.
HIDE_WINDOWS_KNOWN_ERRORS = is_win and os.environ.get('HIDE_WINDOWS_KNOWN_ERRORS', True)
HIDE_WINDOWS_FREEZE_ERRORS = is_win and os.environ.get('HIDE_WINDOWS_FREEZE_ERRORS', True)
#{ Utility Methods
def unbare_repo(func):
"""Methods with this decorator raise InvalidGitRepositoryError if they
encounter a bare repository"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.repo.bare:
raise InvalidGitRepositoryError("Method '%s' cannot operate on bare repositories" % func.__name__)
# END bare method
return func(self, *args, **kwargs)
# END wrapper
return wrapper
@contextlib.contextmanager
def cwd(new_dir):
old_dir = os.getcwd()
os.chdir(new_dir)
try:
yield new_dir
finally:
os.chdir(old_dir)
def rmtree(path):
"""Remove the given recursively.
:note: we use shutil rmtree but adjust its behaviour to see whether files that
couldn't be deleted are read-only. Windows will not remove them in that case"""
def onerror(func, path, exc_info):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
try:
func(path) # Will scream if still not possible to delete.
except Exception as ex:
if HIDE_WINDOWS_KNOWN_ERRORS:
raise SkipTest("FIXME: fails with: PermissionError\n %s", ex)
else:
raise
return shutil.rmtree(path, False, onerror)
def rmfile(path):
"""Ensure file deleted also on *Windows* where read-only files need special treatment."""
if osp.isfile(path):
if is_win:
os.chmod(path, 0o777)
os.remove(path)
def stream_copy(source, destination, chunk_size=512 * 1024):
"""Copy all data from the source stream into the destination stream in chunks
of size chunk_size
:return: amount of bytes written"""
br = 0
while True:
chunk = source.read(chunk_size)
destination.write(chunk)
br += len(chunk)
if len(chunk) < chunk_size:
break
# END reading output stream
return br
def join_path(a, *p):
"""Join path tokens together similar to osp.join, but always use
'/' instead of possibly '\' on windows."""
path = a
for b in p:
if len(b) == 0:
continue
if b.startswith('/'):
path += b[1:]
elif path == '' or path.endswith('/'):
path += b
else:
path += '/' + b
# END for each path token to add
return path
if is_win:
def to_native_path_windows(path):
return path.replace('/', '\\')
def to_native_path_linux(path):
return path.replace('\\', '/')
to_native_path = to_native_path_windows
else:
# no need for any work on linux
def to_native_path_linux(path):
return path
to_native_path = to_native_path_linux
def join_path_native(a, *p):
"""
As join path, but makes sure an OS native path is returned. This is only
needed to play it safe on my dear windows and to assure nice paths that only
use '\'"""
return to_native_path(join_path(a, *p))
def assure_directory_exists(path, is_file=False):
"""Assure that the directory pointed to by path exists.
:param is_file: If True, path is assumed to be a file and handled correctly.
Otherwise it must be a directory
:return: True if the directory was created, False if it already existed"""
if is_file:
path = osp.dirname(path)
# END handle file
if not osp.isdir(path):
os.makedirs(path)
return True
return False
def _get_exe_extensions():
try:
winprog_exts = tuple(p.upper() for p in os.environ['PATHEXT'].split(os.pathsep))
except:
winprog_exts = ('.BAT', 'COM', '.EXE')
return winprog_exts
def py_where(program, path=None):
# From: http://stackoverflow.com/a/377028/548792
try:
winprog_exts = tuple(p.upper() for p in os.environ['PATHEXT'].split(os.pathsep))
except:
winprog_exts = is_win and ('.BAT', 'COM', '.EXE') or ()
def is_exec(fpath):
return osp.isfile(fpath) and os.access(fpath, os.X_OK) and (
os.name != 'nt' or not winprog_exts or any(fpath.upper().endswith(ext)
for ext in winprog_exts))
progs = []
if not path:
path = os.environ["PATH"]
for folder in path.split(os.pathsep):
folder = folder.strip('"')
if folder:
exe_path = osp.join(folder, program)
for f in [exe_path] + ['%s%s' % (exe_path, e) for e in winprog_exts]:
if is_exec(f):
progs.append(f)
return progs
def _cygexpath(drive, path):
if osp.isabs(path) and not drive:
## Invoked from `cygpath()` directly with `D:Apps\123`?
# It's an error, leave it alone just slashes)
p = path
else:
p = path and osp.normpath(osp.expandvars(osp.expanduser(path)))
if osp.isabs(p):
if drive:
# Confusing, maybe a remote system should expand vars.
p = path
else:
p = cygpath(p)
elif drive:
p = '/cygdrive/%s/%s' % (drive.lower(), p)
return p.replace('\\', '/')
_cygpath_parsers = (
## See: https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
## and: https://www.cygwin.com/cygwin-ug-net/using.html#unc-paths
(re.compile(r"\\\\\?\\UNC\\([^\\]+)\\([^\\]+)(?:\\(.*))?"),
(lambda server, share, rest_path: '//%s/%s/%s' % (server, share, rest_path.replace('\\', '/'))),
False
),
(re.compile(r"\\\\\?\\(\w):[/\\](.*)"),
_cygexpath,
False
),
(re.compile(r"(\w):[/\\](.*)"),
_cygexpath,
False
),
(re.compile(r"file:(.*)", re.I),
(lambda rest_path: rest_path),
True),
(re.compile(r"(\w{2,}:.*)"), # remote URL, do nothing
(lambda url: url),
False),
)
def cygpath(path):
"""Use :meth:`git.cmd.Git.polish_url()` instead, that works on any environment."""
if not path.startswith(('/cygdrive', '//')):
for regex, parser, recurse in _cygpath_parsers:
match = regex.match(path)
if match:
path = parser(*match.groups())
if recurse:
path = cygpath(path)
break
else:
path = _cygexpath(None, path)
return path
_decygpath_regex = re.compile(r"/cygdrive/(\w)(/.*)?")
def decygpath(path):
m = _decygpath_regex.match(path)
if m:
drive, rest_path = m.groups()
path = '%s:%s' % (drive.upper(), rest_path or '')
return path.replace('/', '\\')
#: Store boolean flags denoting if a specific Git executable
#: is from a Cygwin installation (since `cache_lru()` unsupported on PY2).
_is_cygwin_cache = {}
def is_cygwin_git(git_executable):
if not is_win:
return False
#from subprocess import check_output
is_cygwin = _is_cygwin_cache.get(git_executable)
if is_cygwin is None:
is_cygwin = False
try:
git_dir = osp.dirname(git_executable)
if not git_dir:
res = py_where(git_executable)
git_dir = osp.dirname(res[0]) if res else None
## Just a name given, not a real path.
uname_cmd = osp.join(git_dir, 'uname')
process = subprocess.Popen([uname_cmd], stdout=subprocess.PIPE,
universal_newlines=True)
uname_out, _ = process.communicate()
#retcode = process.poll()
is_cygwin = 'CYGWIN' in uname_out
except Exception as ex:
log.debug('Failed checking if running in CYGWIN due to: %r', ex)
_is_cygwin_cache[git_executable] = is_cygwin
return is_cygwin
def get_user_id():
""":return: string identifying the currently active system user as name@node"""
return "%s@%s" % (getpass.getuser(), platform.node())
def finalize_process(proc, **kwargs):
"""Wait for the process (clone, fetch, pull or push) and handle its errors accordingly"""
## TODO: No close proc-streams??
proc.wait(**kwargs)
#} END utilities
#{ Classes
class RemoteProgress(object):
"""
Handler providing an interface to parse progress information emitted by git-push
and git-fetch and to dispatch callbacks allowing subclasses to react to the progress.
"""
_num_op_codes = 9
BEGIN, END, COUNTING, COMPRESSING, WRITING, RECEIVING, RESOLVING, FINDING_SOURCES, CHECKING_OUT = \
[1 << x for x in range(_num_op_codes)]
STAGE_MASK = BEGIN | END
OP_MASK = ~STAGE_MASK
DONE_TOKEN = 'done.'
TOKEN_SEPARATOR = ', '
__slots__ = ('_cur_line',
'_seen_ops',
'error_lines', # Lines that started with 'error:' or 'fatal:'.
'other_lines') # Lines not denoting progress (i.e.g. push-infos).
re_op_absolute = re.compile(r"(remote: )?([\w\s]+):\s+()(\d+)()(.*)")
re_op_relative = re.compile(r"(remote: )?([\w\s]+):\s+(\d+)% \((\d+)/(\d+)\)(.*)")
def __init__(self):
self._seen_ops = list()
self._cur_line = None
self.error_lines = []
self.other_lines = []
def _parse_progress_line(self, line):
"""Parse progress information from the given line as retrieved by git-push
or git-fetch.
- Lines that do not contain progress info are stored in :attr:`other_lines`.
- Lines that seem to contain an error (i.e. start with error: or fatal:) are stored
in :attr:`error_lines`.
:return: list(line, ...) list of lines that could not be processed"""
# handle
# Counting objects: 4, done.
# Compressing objects: 50% (1/2) \rCompressing objects: 100% (2/2) \rCompressing objects: 100% (2/2), done.
self._cur_line = line
if len(self.error_lines) > 0 or self._cur_line.startswith(('error:', 'fatal:')):
self.error_lines.append(self._cur_line)
return []
sub_lines = line.split('\r')
failed_lines = list()
for sline in sub_lines:
# find escape characters and cut them away - regex will not work with
# them as they are non-ascii. As git might expect a tty, it will send them
last_valid_index = None
for i, c in enumerate(reversed(sline)):
if ord(c) < 32:
# its a slice index
last_valid_index = -i - 1
# END character was non-ascii
# END for each character in sline
if last_valid_index is not None:
sline = sline[:last_valid_index]
# END cut away invalid part
sline = sline.rstrip()
cur_count, max_count = None, None
match = self.re_op_relative.match(sline)
if match is None:
match = self.re_op_absolute.match(sline)
if not match:
self.line_dropped(sline)
failed_lines.append(sline)
continue
# END could not get match
op_code = 0
remote, op_name, percent, cur_count, max_count, message = match.groups() # @UnusedVariable
# get operation id
if op_name == "Counting objects":
op_code |= self.COUNTING
elif op_name == "Compressing objects":
op_code |= self.COMPRESSING
elif op_name == "Writing objects":
op_code |= self.WRITING
elif op_name == 'Receiving objects':
op_code |= self.RECEIVING
elif op_name == 'Resolving deltas':
op_code |= self.RESOLVING
elif op_name == 'Finding sources':
op_code |= self.FINDING_SOURCES
elif op_name == 'Checking out files':
op_code |= self.CHECKING_OUT
else:
# Note: On windows it can happen that partial lines are sent
# Hence we get something like "CompreReceiving objects", which is
# a blend of "Compressing objects" and "Receiving objects".
# This can't really be prevented, so we drop the line verbosely
# to make sure we get informed in case the process spits out new
# commands at some point.
self.line_dropped(sline)
# Note: Don't add this line to the failed lines, as we have to silently
# drop it
self.other_lines.extend(failed_lines)
return failed_lines
# END handle op code
# figure out stage
if op_code not in self._seen_ops:
self._seen_ops.append(op_code)
op_code |= self.BEGIN
# END begin opcode
if message is None:
message = ''
# END message handling
message = message.strip()
if message.endswith(self.DONE_TOKEN):
op_code |= self.END
message = message[:-len(self.DONE_TOKEN)]
# END end message handling
message = message.strip(self.TOKEN_SEPARATOR)
self.update(op_code,
cur_count and float(cur_count),
max_count and float(max_count),
message)
# END for each sub line
self.other_lines.extend(failed_lines)
return failed_lines
def new_message_handler(self):
"""
:return:
a progress handler suitable for handle_process_output(), passing lines on to this Progress
handler in a suitable format"""
def handler(line):
return self._parse_progress_line(line.rstrip())
# end
return handler
def line_dropped(self, line):
"""Called whenever a line could not be understood and was therefore dropped."""
pass
def update(self, op_code, cur_count, max_count=None, message=''):
"""Called whenever the progress changes
:param op_code:
Integer allowing to be compared against Operation IDs and stage IDs.
Stage IDs are BEGIN and END. BEGIN will only be set once for each Operation
ID as well as END. It may be that BEGIN and END are set at once in case only
one progress message was emitted due to the speed of the operation.
Between BEGIN and END, none of these flags will be set
Operation IDs are all held within the OP_MASK. Only one Operation ID will
be active per call.
:param cur_count: Current absolute count of items
:param max_count:
The maximum count of items we expect. It may be None in case there is
no maximum number of items or if it is (yet) unknown.
:param message:
In case of the 'WRITING' operation, it contains the amount of bytes
transferred. It may possibly be used for other purposes as well.
You may read the contents of the current line in self._cur_line"""
pass
class CallableRemoteProgress(RemoteProgress):
"""An implementation forwarding updates to any callable"""
__slots__ = ('_callable')
def __init__(self, fn):
self._callable = fn
super(CallableRemoteProgress, self).__init__()
def update(self, *args, **kwargs):
self._callable(*args, **kwargs)
class Actor(object):
"""Actors hold information about a person acting on the repository. They
can be committers and authors or anything with a name and an email as
mentioned in the git log entries."""
# PRECOMPILED REGEX
name_only_regex = re.compile(r'<(.+)>')
name_email_regex = re.compile(r'(.*) <(.+?)>')
# ENVIRONMENT VARIABLES
# read when creating new commits
env_author_name = "GIT_AUTHOR_NAME"
env_author_email = "GIT_AUTHOR_EMAIL"
env_committer_name = "GIT_COMMITTER_NAME"
env_committer_email = "GIT_COMMITTER_EMAIL"
# CONFIGURATION KEYS
conf_name = 'name'
conf_email = 'email'
__slots__ = ('name', 'email')
def __init__(self, name, email):
self.name = name
self.email = email
def __eq__(self, other):
return self.name == other.name and self.email == other.email
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash((self.name, self.email))
def __str__(self):
return self.name
def __repr__(self):
return u'<git.Actor "%s <%s>">' % (self.name, self.email)
@classmethod
def _from_string(cls, string):
"""Create an Actor from a string.
:param string: is the string, which is expected to be in regular git format
John Doe <[email protected]>
:return: Actor """
m = cls.name_email_regex.search(string)
if m:
name, email = m.groups()
return Actor(name, email)
else:
m = cls.name_only_regex.search(string)
if m:
return Actor(m.group(1), None)
else:
# assume best and use the whole string as name
return Actor(string, None)
# END special case name
# END handle name/email matching
@classmethod
def _main_actor(cls, env_name, env_email, config_reader=None):
actor = Actor('', '')
default_email = get_user_id()
default_name = default_email.split('@')[0]
for attr, evar, cvar, default in (('name', env_name, cls.conf_name, default_name),
('email', env_email, cls.conf_email, default_email)):
try:
val = os.environ[evar]
if not PY3:
val = val.decode(defenc)
# end assure we don't get 'invalid strings'
setattr(actor, attr, val)
except KeyError:
if config_reader is not None:
setattr(actor, attr, config_reader.get_value('user', cvar, default))
# END config-reader handling
if not getattr(actor, attr):
setattr(actor, attr, default)
# END handle name
# END for each item to retrieve
return actor
@classmethod
def committer(cls, config_reader=None):
"""
:return: Actor instance corresponding to the configured committer. It behaves
similar to the git implementation, such that the environment will override
configuration values of config_reader. If no value is set at all, it will be
generated
:param config_reader: ConfigReader to use to retrieve the values from in case
they are not set in the environment"""
return cls._main_actor(cls.env_committer_name, cls.env_committer_email, config_reader)
@classmethod
def author(cls, config_reader=None):
"""Same as committer(), but defines the main author. It may be specified in the environment,
but defaults to the committer"""
return cls._main_actor(cls.env_author_name, cls.env_author_email, config_reader)
class Stats(object):
"""
Represents stat information as presented by git at the end of a merge. It is
created from the output of a diff operation.
``Example``::
c = Commit( sha1 )
s = c.stats
s.total # full-stat-dict
s.files # dict( filepath : stat-dict )
``stat-dict``
A dictionary with the following keys and values::
deletions = number of deleted lines as int
insertions = number of inserted lines as int
lines = total number of lines changed as int, or deletions + insertions
``full-stat-dict``
In addition to the items in the stat-dict, it features additional information::
files = number of changed files as int"""
__slots__ = ("total", "files")
def __init__(self, total, files):
self.total = total
self.files = files
@classmethod
def _list_from_string(cls, repo, text):
"""Create a Stat object from output retrieved by git-diff.
:return: git.Stat"""
hsh = {'total': {'insertions': 0, 'deletions': 0, 'lines': 0, 'files': 0}, 'files': dict()}
for line in text.splitlines():
(raw_insertions, raw_deletions, filename) = line.split("\t")
insertions = raw_insertions != '-' and int(raw_insertions) or 0
deletions = raw_deletions != '-' and int(raw_deletions) or 0
hsh['total']['insertions'] += insertions
hsh['total']['deletions'] += deletions
hsh['total']['lines'] += insertions + deletions
hsh['total']['files'] += 1
hsh['files'][filename.strip()] = {'insertions': insertions,
'deletions': deletions,
'lines': insertions + deletions}
return Stats(hsh['total'], hsh['files'])
class IndexFileSHA1Writer(object):
"""Wrapper around a file-like object that remembers the SHA1 of
the data written to it. It will write a sha when the stream is closed
or if the asked for explicitly using write_sha.
Only useful to the indexfile
:note: Based on the dulwich project"""
__slots__ = ("f", "sha1")
def __init__(self, f):
self.f = f
self.sha1 = make_sha(b"")
def write(self, data):
self.sha1.update(data)
return self.f.write(data)
def write_sha(self):
sha = self.sha1.digest()
self.f.write(sha)
return sha
def close(self):
sha = self.write_sha()
self.f.close()
return sha
def tell(self):
return self.f.tell()
class LockFile(object):
"""Provides methods to obtain, check for, and release a file based lock which
should be used to handle concurrent access to the same file.
As we are a utility class to be derived from, we only use protected methods.
Locks will automatically be released on destruction"""
__slots__ = ("_file_path", "_owns_lock")
def __init__(self, file_path):
self._file_path = file_path
self._owns_lock = False
def __del__(self):
self._release_lock()
def _lock_file_path(self):
""":return: Path to lockfile"""
return "%s.lock" % (self._file_path)
def _has_lock(self):
""":return: True if we have a lock and if the lockfile still exists
:raise AssertionError: if our lock-file does not exist"""
return self._owns_lock
def _obtain_lock_or_raise(self):
"""Create a lock file as flag for other instances, mark our instance as lock-holder
:raise IOError: if a lock was already present or a lock file could not be written"""
if self._has_lock():
return
lock_file = self._lock_file_path()
if osp.isfile(lock_file):
raise IOError("Lock for file %r did already exist, delete %r in case the lock is illegal" %
(self._file_path, lock_file))
try:
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
if is_win:
flags |= os.O_SHORT_LIVED
fd = os.open(lock_file, flags, 0)
os.close(fd)
except OSError as e:
raise IOError(str(e))
self._owns_lock = True
def _obtain_lock(self):
"""The default implementation will raise if a lock cannot be obtained.
Subclasses may override this method to provide a different implementation"""
return self._obtain_lock_or_raise()
def _release_lock(self):
"""Release our lock if we have one"""
if not self._has_lock():
return
# if someone removed our file beforhand, lets just flag this issue
# instead of failing, to make it more usable.
lfp = self._lock_file_path()
try:
rmfile(lfp)
except OSError:
pass
self._owns_lock = False
class BlockingLockFile(LockFile):
"""The lock file will block until a lock could be obtained, or fail after
a specified timeout.
:note: If the directory containing the lock was removed, an exception will
be raised during the blocking period, preventing hangs as the lock
can never be obtained."""
__slots__ = ("_check_interval", "_max_block_time")
def __init__(self, file_path, check_interval_s=0.3, max_block_time_s=MAXSIZE):
"""Configure the instance
:parm check_interval_s:
Period of time to sleep until the lock is checked the next time.
By default, it waits a nearly unlimited time
:parm max_block_time_s: Maximum amount of seconds we may lock"""
super(BlockingLockFile, self).__init__(file_path)
self._check_interval = check_interval_s
self._max_block_time = max_block_time_s
def _obtain_lock(self):
"""This method blocks until it obtained the lock, or raises IOError if
it ran out of time or if the parent directory was not available anymore.
If this method returns, you are guranteed to own the lock"""
starttime = time.time()
maxtime = starttime + float(self._max_block_time)
while True:
try:
super(BlockingLockFile, self)._obtain_lock()
except IOError:
# synity check: if the directory leading to the lockfile is not
# readable anymore, raise an exception
curtime = time.time()
if not osp.isdir(osp.dirname(self._lock_file_path())):
msg = "Directory containing the lockfile %r was not readable anymore after waiting %g seconds" % (
self._lock_file_path(), curtime - starttime)
raise IOError(msg)
# END handle missing directory
if curtime >= maxtime:
msg = "Waited %g seconds for lock at %r" % (maxtime - starttime, self._lock_file_path())
raise IOError(msg)
# END abort if we wait too long
time.sleep(self._check_interval)
else:
break
# END endless loop
class IterableList(list):
"""
List of iterable objects allowing to query an object by id or by named index::
heads = repo.heads
heads.master
heads['master']
heads[0]
It requires an id_attribute name to be set which will be queried from its
contained items to have a means for comparison.
A prefix can be specified which is to be used in case the id returned by the
items always contains a prefix that does not matter to the user, so it
can be left out."""
__slots__ = ('_id_attr', '_prefix')
def __new__(cls, id_attr, prefix=''):
return super(IterableList, cls).__new__(cls)
def __init__(self, id_attr, prefix=''):
self._id_attr = id_attr
self._prefix = prefix
def __contains__(self, attr):
# first try identity match for performance
rval = list.__contains__(self, attr)
if rval:
return rval
# END handle match
# otherwise make a full name search
try:
getattr(self, attr)
return True
except (AttributeError, TypeError):
return False
# END handle membership
def __getattr__(self, attr):
attr = self._prefix + attr
for item in self:
if getattr(item, self._id_attr) == attr:
return item
# END for each item
return list.__getattribute__(self, attr)
def __getitem__(self, index):
if isinstance(index, int):
return list.__getitem__(self, index)
try:
return getattr(self, index)
except AttributeError:
raise IndexError("No item found with id %r" % (self._prefix + index))
# END handle getattr
def __delitem__(self, index):
delindex = index
if not isinstance(index, int):
delindex = -1
name = self._prefix + index
for i, item in enumerate(self):
if getattr(item, self._id_attr) == name:
delindex = i
break
# END search index
# END for each item
if delindex == -1:
raise IndexError("Item with name %s not found" % name)
# END handle error
# END get index to delete
list.__delitem__(self, delindex)
class Iterable(object):
"""Defines an interface for iterable items which is to assure a uniform
way to retrieve and iterate items within the git repository"""
__slots__ = tuple()
_id_attribute_ = "attribute that most suitably identifies your instance"
@classmethod
def list_items(cls, repo, *args, **kwargs):
"""
Find all items of this type - subclasses can specify args and kwargs differently.
If no args are given, subclasses are obliged to return all items if no additional
arguments arg given.
:note: Favor the iter_items method as it will
:return:list(Item,...) list of item instances"""
out_list = IterableList(cls._id_attribute_)
out_list.extend(cls.iter_items(repo, *args, **kwargs))
return out_list
@classmethod
def iter_items(cls, repo, *args, **kwargs):
"""For more information about the arguments, see list_items
:return: iterator yielding Items"""
raise NotImplementedError("To be implemented by Subclass")
#} END classes
class NullHandler(logging.Handler):
def emit(self, record):
pass
# In Python 2.6, there is no NullHandler yet. Let's monkey-patch it for a workaround.
if not hasattr(logging, 'NullHandler'):
logging.NullHandler = NullHandler
| [
"[email protected]"
] | |
8c3071706a8894ff8207c463c10c2281dd7b7f1c | 1139841f6451c0e9e2a53a808966139dbde60f54 | /nlp_tools/nlp_postprocessor/postprocessing_rule.py | 43a30de044312219c39aedddaf33a6d6edb89b2b | [] | no_license | abchapman93/nlp_tools | 6728d158aa3bb06e2d5e6fa58d8924315e42ce09 | 876aac6203d596569983ca9920fde888f34cc3f3 | refs/heads/master | 2022-10-23T18:05:02.267427 | 2020-06-16T16:17:43 | 2020-06-16T16:17:43 | 251,348,604 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,407 | py | class PostprocessingRule:
def __init__(self, patterns, action, name=None, description=None, action_args=None):
"""A PostprocessingRule checks conditions of a spaCy Span entity
and executes some action if all patterns are met.
patterns (list): A list of PostprocessingPatterns,
each of which check a condition of an entity.
action (function): A function to call with the entity as an argument.
This function should take ay least the following two arguments:
ent: the spacy span
i: the index of ent in doc.ents
Additional positional arguments can be provided in action_args.
name (str): Optional name of rule.
description (str): Optional description of the rule.
action_args (tuple or None): Optional tuple of positional arguments
to pass to action() if all patterns pass. Default is None,
in which case the rule will call action(ent, i).
"""
self.patterns = patterns
self.action = action
self.name = name
self.description = description
self.action_args = action_args
def __call__(self, ent, i, debug=False):
"""Iterate through all of the patterns in self.rules.
If any pattern does not pass (ie., return True), then returns False.
If they all pass, execute self.action and return True.
"""
for pattern in self.patterns:
# If this is a tuple, at least one has to pass
if isinstance(pattern, tuple):
passed = False
for subpattern in pattern:
rslt = subpattern(ent)
if rslt is True:
passed = True
break
if passed is False:
return False
# Otherwise just check a single value
else:
rslt = pattern(ent)
if rslt is False:
return False
# Every pattern passed - do the action
if debug:
print("Passed:", self, "on ent:", ent, ent.sent)
if self.action_args is None:
self.action(ent, i)
else:
self.action(ent, i, *self.action_args)
return True
def __repr__(self):
return f"PostprocessingRule: {self.name} - {self.description}" | [
"[email protected]"
] | |
f2d6974ca6d35f5e54f4a11c60c50b255be0fc3a | c67f2d0677f8870bc1d970891bbe31345ea55ce2 | /zippy/lib-python/3/multiprocessing/heap.py | 0a25ef05c7f2ce14d67f1e292b2b07881fa9e953 | [
"BSD-3-Clause"
] | permissive | securesystemslab/zippy | a5a1ecf5c688504d8d16128ce901406ffd6f32c2 | ff0e84ac99442c2c55fe1d285332cfd4e185e089 | refs/heads/master | 2022-07-05T23:45:36.330407 | 2018-07-10T22:17:32 | 2018-07-10T22:17:32 | 67,824,983 | 324 | 27 | null | null | null | null | UTF-8 | Python | false | false | 8,582 | py | #
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import bisect
import mmap
import tempfile
import os
import sys
import threading
import itertools
import _multiprocessing
from multiprocessing.util import Finalize, info
from multiprocessing.forking import assert_spawning
__all__ = ['BufferWrapper']
#
# Inheirtable class which wraps an mmap, and from which blocks can be allocated
#
if sys.platform == 'win32':
from _multiprocessing import win32
class Arena(object):
_counter = itertools.count()
def __init__(self, size):
self.size = size
self.name = 'pym-%d-%d' % (os.getpid(), next(Arena._counter))
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == 0, 'tagname already in use'
self._state = (self.size, self.name)
def __getstate__(self):
assert_spawning(self)
return self._state
def __setstate__(self, state):
self.size, self.name = self._state = state
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
assert win32.GetLastError() == win32.ERROR_ALREADY_EXISTS
else:
class Arena(object):
def __init__(self, size):
self.buffer = mmap.mmap(-1, size)
self.size = size
self.name = None
#
# Class allowing allocation of chunks of memory from arenas
#
class Heap(object):
_alignment = 8
def __init__(self, size=mmap.PAGESIZE):
self._lastpid = os.getpid()
self._lock = threading.Lock()
self._size = size
self._lengths = []
self._len_to_seq = {}
self._start_to_block = {}
self._stop_to_block = {}
self._allocated_blocks = set()
self._arenas = []
# list of pending blocks to free - see free() comment below
self._pending_free_blocks = []
@staticmethod
def _roundup(n, alignment):
# alignment must be a power of 2
mask = alignment - 1
return (n + mask) & ~mask
def _malloc(self, size):
# returns a large enough block -- it might be much larger
i = bisect.bisect_left(self._lengths, size)
if i == len(self._lengths):
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
self._size *= 2
info('allocating a new mmap of length %d', length)
arena = Arena(length)
self._arenas.append(arena)
return (arena, 0, length)
else:
length = self._lengths[i]
seq = self._len_to_seq[length]
block = seq.pop()
if not seq:
del self._len_to_seq[length], self._lengths[i]
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
return block
def _free(self, block):
# free location and try to merge with neighbours
(arena, start, stop) = block
try:
prev_block = self._stop_to_block[(arena, start)]
except KeyError:
pass
else:
start, _ = self._absorb(prev_block)
try:
next_block = self._start_to_block[(arena, stop)]
except KeyError:
pass
else:
_, stop = self._absorb(next_block)
block = (arena, start, stop)
length = stop - start
try:
self._len_to_seq[length].append(block)
except KeyError:
self._len_to_seq[length] = [block]
bisect.insort(self._lengths, length)
self._start_to_block[(arena, start)] = block
self._stop_to_block[(arena, stop)] = block
def _absorb(self, block):
# deregister this block so it can be merged with a neighbour
(arena, start, stop) = block
del self._start_to_block[(arena, start)]
del self._stop_to_block[(arena, stop)]
length = stop - start
seq = self._len_to_seq[length]
seq.remove(block)
if not seq:
del self._len_to_seq[length]
self._lengths.remove(length)
return start, stop
def _free_pending_blocks(self):
# Free all the blocks in the pending list - called with the lock held.
while True:
try:
block = self._pending_free_blocks.pop()
except IndexError:
break
self._allocated_blocks.remove(block)
self._free(block)
def free(self, block):
# free a block returned by malloc()
# Since free() can be called asynchronously by the GC, it could happen
# that it's called while self._lock is held: in that case,
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
# trylock is used instead, and if the lock can't be acquired
# immediately, the block is added to a list of blocks to be freed
# synchronously sometimes later from malloc() or free(), by calling
# _free_pending_blocks() (appending and retrieving from a list is not
# strictly thread-safe but under cPython it's atomic thanks to the GIL).
assert os.getpid() == self._lastpid
if not self._lock.acquire(False):
# can't acquire the lock right now, add the block to the list of
# pending blocks to free
self._pending_free_blocks.append(block)
else:
# we hold the lock
try:
self._free_pending_blocks()
self._allocated_blocks.remove(block)
self._free(block)
finally:
self._lock.release()
def malloc(self, size):
# return a block of right size (possibly rounded up)
assert 0 <= size < sys.maxsize
if os.getpid() != self._lastpid:
self.__init__() # reinitialize after fork
self._lock.acquire()
self._free_pending_blocks()
try:
size = self._roundup(max(size,1), self._alignment)
(arena, start, stop) = self._malloc(size)
new_stop = start + size
if new_stop < stop:
self._free((arena, new_stop, stop))
block = (arena, start, new_stop)
self._allocated_blocks.add(block)
return block
finally:
self._lock.release()
#
# Class representing a chunk of an mmap -- can be inherited
#
class BufferWrapper(object):
_heap = Heap()
def __init__(self, size):
assert 0 <= size < sys.maxsize
block = BufferWrapper._heap.malloc(size)
self._state = (block, size)
Finalize(self, BufferWrapper._heap.free, args=(block,))
def get_address(self):
(arena, start, stop), size = self._state
address, length = _multiprocessing.address_of_buffer(arena.buffer)
assert size <= length
return address + start
def get_size(self):
return self._state[1]
| [
"[email protected]"
] | |
411947a1984c31f8fbdb8e6f9394716a8adaf64c | 14956dbed8ae4fba1d65b9829d9405fcf43ac698 | /Cyber Security/Capture the Flag Competitions/2020/DSTA CDDC 2020/Warp Gate 4/(UNDONE) What Time Is It [1]/solve.py | 9ba0fc4d6cf73be6d252a5672169fe10759bb934 | [] | no_license | Hackin7/Programming-Crappy-Solutions | ae8bbddad92a48cf70976cec91bf66234c9b4d39 | ffa3b3c26a6a06446cc49c8ac4f35b6d30b1ee0f | refs/heads/master | 2023-03-21T01:21:00.764957 | 2022-12-28T14:22:33 | 2022-12-28T14:22:33 | 201,292,128 | 12 | 7 | null | 2023-03-05T16:05:34 | 2019-08-08T16:00:21 | Roff | UTF-8 | Python | false | false | 718 | py | datesString = '''\
2005.10.06 05:23:15
2020.10.05 22:39:46
2020.08.29 05:16:57
2020.08.12 10:05:39
2020.09.29 06:36:38
2020.09.27 00:41:56
2020.09.30 18:43:24
2020.08.10 03:54:13
2020.09.24 00:09:37
2020.09.16 09:20:23
2020.08.10 22:06:44
2020.08.10 23:19:09
2020.08.13 22:08:52
1987.04.11 00:43:13\
'''
import datetime
dates = datesString.split('\n')
for i in range(len(dates)):
print(dates[i])
dates[i] = datetime.datetime.strptime(dates[i], '%Y.%m.%d %H:%M:%S')
def avg(dates):
any_reference_date = datetime.datetime(1900, 1, 1)
return any_reference_date + sum([date - any_reference_date for date in dates], datetime.timedelta()) / len(dates)
print()
print(avg(dates))
| [
"[email protected]"
] | |
bf984644b822f43a0a3dabb801c8a47cc73ebe50 | 225bc8ac617a721ae79e8287ca0df47439740c6b | /strip_tags.py | 62fa10f4d5cf0c2ed91f72923096389502b6475f | [
"Artistic-2.0"
] | permissive | rec/strip-tags | 558228d1e24679822a6c3ec9130d9d1867657251 | 4f6568fa26089275d77c82d4c55c6a0f05bbebd1 | refs/heads/master | 2020-04-14T21:28:04.464951 | 2014-05-17T18:51:22 | 2014-05-17T18:51:22 | 19,893,626 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 330 | py | #!/usr/bin/env python
from BeautifulSoup import BeautifulSoup
def strip_all_tags(html):
return html and ''.join(BeautifulSoup(html).findAll(text=True))
def join_inputs():
results = []
while True:
try:
results.append(raw_input())
except EOFError:
return ''.join(results)
print(strip_all_tags(html))
| [
"[email protected]"
] | |
4f9db7146b298561b8def897b45f7e3ecbd7e31a | 28f3e82c99fe3628f3d0b361f627408a2fdacfc2 | /driver/migrations/0003_auto_20190316_2209.py | 5fd2ff7c7dc5ae639e7af43cb2f363d6084df126 | [] | no_license | aballah-chamakh/Delivery_Tracker | 09957eae173f30406eb285256bfb07b119ddce22 | ffb07027e81aeb0fab90fc41963544625dd84fea | refs/heads/master | 2020-04-27T22:31:49.854344 | 2019-04-09T18:11:05 | 2019-04-09T18:11:05 | 174,740,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | # Generated by Django 2.0 on 2019-03-16 21:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver', '0002_driver_image'),
]
operations = [
migrations.AlterField(
model_name='driver',
name='vehicle',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='vehicle.Vehicle'),
),
]
| [
"[email protected]"
] | |
1cd7578ae2c49e8fc2d355e4c3815d9c7ca0f211 | 384efc85a7845caa10c1090c80243a9a29215d8a | /02_RE_Objects_Methods/05_findall.py | b7b39951a5c4adbf13b0a3637478ed0cd36154dd | [] | no_license | Do-code-ing/Python_Module_RE | 032754e8c5b9e619d83602bdff4b91747b419b21 | 01bdd0202fdc7040971f3132d4cbbbde11175bb9 | refs/heads/master | 2023-06-02T22:32:37.543180 | 2021-06-20T05:07:34 | 2021-06-20T05:07:34 | 358,269,215 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 303 | py | # Pattern.findall(string[, pos[, endpos]])
# findall() 함수와 유사한데, 컴파일된 패턴을 사용한다.
# 하지만 search() 처럼 검색 영역을 제한하는 선택적 pos와 endpos 매개 변수도 받아들인다.
import re
p = re.compile("a")
print(p.findall("abcabc"))
# ['a', 'a'] | [
"[email protected]"
] | |
9cafa8166e85879b279933c5a3f29dae772a0331 | 57cb9fef5efac78758f5d151b959ca2216c94083 | /edx/app/certs/venvs/certs/bin/wheel | 5c46be6ad93ddedd93efd28f3d94527801ee7e4d | [] | no_license | JosiahKennedy/openedx-branded | 9751d5362088276a87b2e0edca0913568eeb1ac4 | d16a25b035b2e810b8ab2b0a2ac032b216562e26 | refs/heads/master | 2022-12-21T02:39:17.133147 | 2020-03-25T06:03:23 | 2020-03-25T06:03:23 | 249,895,218 | 0 | 1 | null | 2022-12-08T01:23:48 | 2020-03-25T05:33:05 | null | UTF-8 | Python | false | false | 233 | #!/edx/app/certs/venvs/certs/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
90ce63a11f08d9fa3007f7ad5fc753a346e978e4 | 88c5045676e32eb7ca1a40d8a4379c7c83f6faca | /tests/trinity/integration/test_plugin_discovery.py | b462334b35106f5d31d7acd6159c76f62dfeba05 | [
"MIT"
] | permissive | cd4761/ecc-trinity | 9983a4f29c9262293379ff03931e652b00f398a3 | ed27c5c8bc2b94b796e74f271594f138c2aa5b36 | refs/heads/master | 2020-06-23T21:23:50.829092 | 2019-07-29T10:45:20 | 2019-07-29T10:45:20 | 198,756,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 438 | py | from trinity.plugins.registry import (
discover_plugins
)
# This plugin is external to this code base and installed by tox
# In order to install it locally run:
# pip install -e trinity-external-plugins/examples/peer_count_reporter
from peer_count_reporter_plugin import PeerCountReporterPlugin
def test_plugin_discovery():
plugins = [type(plugin) for plugin in discover_plugins()]
assert PeerCountReporterPlugin in plugins
| [
"[email protected]"
] | |
3ebf29cec10ec32a1feaddd87586c0b85af2e132 | 90cea58e80309d2dff88f73f3a43ed5f943ff97d | /MaxSubArray.py | aa062ccb2dac884a1a93196e6e3675cdc0c47d68 | [] | no_license | SaiSujithReddy/CodePython | 0b65c82b0e71dba2bbd4c1aefec4e6cd6fd42341 | 4c05b7909092009afffa4536fd284060d20e462d | refs/heads/master | 2022-02-24T09:21:15.284745 | 2019-10-07T23:36:17 | 2019-10-07T23:36:17 | 106,611,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py |
import sys
def max_sub_array(array):
max_sum = -sys.maxsize - 1
temp_sum = 0
for x in range(len(array)):
print("entered for loop")
if array[x] >= 0:
temp_sum += array[x]
if temp_sum > max_sum:
max_sum = temp_sum
else:
temp_sum += array[x]
if temp_sum <0:
temp_sum = 0
print(temp_sum,max_sum)
print(max(max_sum,temp_sum))
# array = [1,2,3,-1,-5,-5,4]
# array = [1,2,3,-1,-5,-5,4,3]
# array = [1]
# array = [0]
# array = [-1]
# array = [-1,-1,3,0,-100,101]
# array = [1000,1,2,3,4,-2]
array = [-100,-99,-9800,1000,-2,2000,-90,-9000]
max_sub_array(array) | [
"[email protected]"
] | |
f9d3e18d03b7ad0e4bff8ac8ad3eda798bb6f2e8 | 5d6365f4cc81272f8c481ee31f1111e8eca6dca5 | /alipay/aop/api/domain/AlipayCommerceOperationPromoterRankingQueryModel.py | f9cf8ced33270bccfe1a08deb642d7a7987eeba1 | [
"Apache-2.0"
] | permissive | barrybbb/alipay-sdk-python-all | 9e99b56138e6ca9c0b236707c79899d396ac6f88 | 1b63620431d982d30d39ee0adc4b92463cbcee3c | refs/heads/master | 2023-08-22T20:16:17.242701 | 2021-10-11T08:22:44 | 2021-10-11T08:22:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,711 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceOperationPromoterRankingQueryModel(object):
def __init__(self):
self._pid = None
self._type = None
self._user_id = None
@property
def pid(self):
return self._pid
@pid.setter
def pid(self, value):
self._pid = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.pid:
if hasattr(self.pid, 'to_alipay_dict'):
params['pid'] = self.pid.to_alipay_dict()
else:
params['pid'] = self.pid
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceOperationPromoterRankingQueryModel()
if 'pid' in d:
o.pid = d['pid']
if 'type' in d:
o.type = d['type']
if 'user_id' in d:
o.user_id = d['user_id']
return o
| [
"[email protected]"
] | |
a40f3a2fe76ca92d94c31c07ec545abcd156d9e2 | bd696223aaf5404987df11832b4c17c916b9690f | /nlp_sample/fugashi_generic_tagger_sample/main.py | 575cf1bca8948151845f4b8df85265ae91a5b162 | [] | no_license | wararaki718/scrapbox3 | 000a285477f25c1e8a4b6017b6ad06c76f173342 | 9be5dc879a33a1988d9f6611307c499eec125dc2 | refs/heads/master | 2023-06-16T08:46:32.879231 | 2021-07-17T14:12:54 | 2021-07-17T14:12:54 | 280,590,887 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from fugashi import Tagger, GenericTagger
def main():
text = 'softbank'
tagger = Tagger()
gtagger = GenericTagger()
print('Tagger:')
print(tagger.parse(text))
for word in tagger(text):
print(word.surface)
print(word.feature)
print()
print('GenericTagger:')
print(gtagger.parse(text))
for word in gtagger(text):
print(word.surface)
print(word.feature)
print()
print('DONE')
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
8fee0875e9f03699c64938e05824c350f5061964 | b9c7c3433675278dcbd6e52056a299ccd2a2a122 | /sword/match.py | e41aa854102ee47f79dadfacd3c8871d0e16ec4b | [] | no_license | smileshy777/practice | 3d6b8412138c94e75810298bc2dcde52d374826b | a0bc7d7fb9fe2db958c3ee2671df927ce136ecff | refs/heads/master | 2020-03-31T15:44:45.868628 | 2018-11-29T05:08:02 | 2018-11-29T05:08:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py |
class Solution:
# s, pattern都是字符串
def match(self, s, pattern):
len_s = len(s)
len_p = len(pattern)
if len_s == 0 and len_p == 0:
return True
if len_s > 0 and len_p == 0:
return False
if len_p > 1 and pattern[1] == '*':
if len_s > 0 and (s[0] == pattern[0] or pattern[0] == '.'):
return self.match(s, pattern[2:]) or self.match(s[1:], pattern[2:]) or self.match(s[1:], pattern)
else:
return self.match(s, pattern[2:])
if len_s > 0 and (s[0] == pattern[0] or pattern[0] == '.'):
return self.match(s[1:], pattern[1:])
return False | [
"[email protected]"
] | |
58bf681835fc8bcacc6ed3c4562095a2cf0b809d | d1c67f2031d657902acef4411877d75b992eab91 | /swagger_client/models/xmpp_integration.py | 649d87606c8f04247adedf1b96b0924b65914aa9 | [] | no_license | Certn/opsgenie-python | c6e6a7f42394499e5224d679cc9a449042fcf9c3 | bd5f402f97d591e4082b38c938cbabca4cf29787 | refs/heads/master | 2023-01-01T10:45:13.132455 | 2020-10-27T17:40:01 | 2020-10-27T17:40:01 | 307,769,432 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28,507 | py | # coding: utf-8
"""
Opsgenie REST API
Opsgenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class XmppIntegration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'ignore_responders_from_payload': 'bool',
'ignore_tags_from_payload': 'bool',
'ignore_extra_properties_from_payload': 'bool',
'responders': 'list[Recipient]',
'priority': 'str',
'custom_priority': 'str',
'tags': 'list[str]',
'extra_properties': 'dict(str, str)',
'assigned_team': 'TeamMeta',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_read_access': 'bool',
'allow_write_access': 'bool',
'allow_delete_access': 'bool',
'alert_filter': 'AlertFilter',
'alert_actions': 'list[str]',
'callback_type': 'str',
'send_alert_actions': 'bool',
'bidirectional_callback_type': 'str',
'send_via_marid': 'bool',
'send_via_oec': 'bool'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'ignore_responders_from_payload': 'ignoreRespondersFromPayload',
'ignore_tags_from_payload': 'ignoreTagsFromPayload',
'ignore_extra_properties_from_payload': 'ignoreExtraPropertiesFromPayload',
'responders': 'responders',
'priority': 'priority',
'custom_priority': 'customPriority',
'tags': 'tags',
'extra_properties': 'extraProperties',
'assigned_team': 'assignedTeam',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_read_access': 'allowReadAccess',
'allow_write_access': 'allowWriteAccess',
'allow_delete_access': 'allowDeleteAccess',
'alert_filter': 'alertFilter',
'alert_actions': 'alertActions',
'callback_type': 'callback-type',
'send_alert_actions': 'sendAlertActions',
'bidirectional_callback_type': 'bidirectional-callback-type',
'send_via_marid': 'sendViaMarid',
'send_via_oec': 'sendViaOEC'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, ignore_responders_from_payload=None, ignore_tags_from_payload=None, ignore_extra_properties_from_payload=None, responders=None, priority=None, custom_priority=None, tags=None, extra_properties=None, assigned_team=None, feature_type=None, allow_configuration_access=None, allow_read_access=None, allow_write_access=None, allow_delete_access=None, alert_filter=None, alert_actions=None, callback_type=None, send_alert_actions=None, bidirectional_callback_type=None, send_via_marid=None, send_via_oec=None): # noqa: E501
"""XmppIntegration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._ignore_responders_from_payload = None
self._ignore_tags_from_payload = None
self._ignore_extra_properties_from_payload = None
self._responders = None
self._priority = None
self._custom_priority = None
self._tags = None
self._extra_properties = None
self._assigned_team = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_read_access = None
self._allow_write_access = None
self._allow_delete_access = None
self._alert_filter = None
self._alert_actions = None
self._callback_type = None
self._send_alert_actions = None
self._bidirectional_callback_type = None
self._send_via_marid = None
self._send_via_oec = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if ignore_responders_from_payload is not None:
self.ignore_responders_from_payload = ignore_responders_from_payload
if ignore_tags_from_payload is not None:
self.ignore_tags_from_payload = ignore_tags_from_payload
if ignore_extra_properties_from_payload is not None:
self.ignore_extra_properties_from_payload = ignore_extra_properties_from_payload
if responders is not None:
self.responders = responders
if priority is not None:
self.priority = priority
if custom_priority is not None:
self.custom_priority = custom_priority
if tags is not None:
self.tags = tags
if extra_properties is not None:
self.extra_properties = extra_properties
if assigned_team is not None:
self.assigned_team = assigned_team
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_read_access is not None:
self.allow_read_access = allow_read_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
if allow_delete_access is not None:
self.allow_delete_access = allow_delete_access
if alert_filter is not None:
self.alert_filter = alert_filter
if alert_actions is not None:
self.alert_actions = alert_actions
if callback_type is not None:
self.callback_type = callback_type
if send_alert_actions is not None:
self.send_alert_actions = send_alert_actions
if bidirectional_callback_type is not None:
self.bidirectional_callback_type = bidirectional_callback_type
if send_via_marid is not None:
self.send_via_marid = send_via_marid
if send_via_oec is not None:
self.send_via_oec = send_via_oec
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this XmppIntegration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this XmppIntegration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this XmppIntegration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this XmppIntegration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this XmppIntegration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this XmppIntegration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this XmppIntegration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this XmppIntegration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this XmppIntegration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this XmppIntegration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this XmppIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this XmppIntegration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this XmppIntegration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this XmppIntegration. # noqa: E501
:return: The is_advanced of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this XmppIntegration.
:param is_advanced: The is_advanced of this XmppIntegration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def ignore_responders_from_payload(self):
"""Gets the ignore_responders_from_payload of this XmppIntegration. # noqa: E501
:return: The ignore_responders_from_payload of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_responders_from_payload
@ignore_responders_from_payload.setter
def ignore_responders_from_payload(self, ignore_responders_from_payload):
"""Sets the ignore_responders_from_payload of this XmppIntegration.
:param ignore_responders_from_payload: The ignore_responders_from_payload of this XmppIntegration. # noqa: E501
:type: bool
"""
self._ignore_responders_from_payload = ignore_responders_from_payload
@property
def ignore_tags_from_payload(self):
"""Gets the ignore_tags_from_payload of this XmppIntegration. # noqa: E501
:return: The ignore_tags_from_payload of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_tags_from_payload
@ignore_tags_from_payload.setter
def ignore_tags_from_payload(self, ignore_tags_from_payload):
"""Sets the ignore_tags_from_payload of this XmppIntegration.
:param ignore_tags_from_payload: The ignore_tags_from_payload of this XmppIntegration. # noqa: E501
:type: bool
"""
self._ignore_tags_from_payload = ignore_tags_from_payload
@property
def ignore_extra_properties_from_payload(self):
"""Gets the ignore_extra_properties_from_payload of this XmppIntegration. # noqa: E501
:return: The ignore_extra_properties_from_payload of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._ignore_extra_properties_from_payload
@ignore_extra_properties_from_payload.setter
def ignore_extra_properties_from_payload(self, ignore_extra_properties_from_payload):
"""Sets the ignore_extra_properties_from_payload of this XmppIntegration.
:param ignore_extra_properties_from_payload: The ignore_extra_properties_from_payload of this XmppIntegration. # noqa: E501
:type: bool
"""
self._ignore_extra_properties_from_payload = ignore_extra_properties_from_payload
@property
def responders(self):
"""Gets the responders of this XmppIntegration. # noqa: E501
:return: The responders of this XmppIntegration. # noqa: E501
:rtype: list[Recipient]
"""
return self._responders
@responders.setter
def responders(self, responders):
"""Sets the responders of this XmppIntegration.
:param responders: The responders of this XmppIntegration. # noqa: E501
:type: list[Recipient]
"""
self._responders = responders
@property
def priority(self):
"""Gets the priority of this XmppIntegration. # noqa: E501
:return: The priority of this XmppIntegration. # noqa: E501
:rtype: str
"""
return self._priority
@priority.setter
def priority(self, priority):
"""Sets the priority of this XmppIntegration.
:param priority: The priority of this XmppIntegration. # noqa: E501
:type: str
"""
self._priority = priority
@property
def custom_priority(self):
"""Gets the custom_priority of this XmppIntegration. # noqa: E501
:return: The custom_priority of this XmppIntegration. # noqa: E501
:rtype: str
"""
return self._custom_priority
@custom_priority.setter
def custom_priority(self, custom_priority):
"""Sets the custom_priority of this XmppIntegration.
:param custom_priority: The custom_priority of this XmppIntegration. # noqa: E501
:type: str
"""
self._custom_priority = custom_priority
@property
def tags(self):
"""Gets the tags of this XmppIntegration. # noqa: E501
:return: The tags of this XmppIntegration. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this XmppIntegration.
:param tags: The tags of this XmppIntegration. # noqa: E501
:type: list[str]
"""
self._tags = tags
@property
def extra_properties(self):
"""Gets the extra_properties of this XmppIntegration. # noqa: E501
:return: The extra_properties of this XmppIntegration. # noqa: E501
:rtype: dict(str, str)
"""
return self._extra_properties
@extra_properties.setter
def extra_properties(self, extra_properties):
"""Sets the extra_properties of this XmppIntegration.
:param extra_properties: The extra_properties of this XmppIntegration. # noqa: E501
:type: dict(str, str)
"""
self._extra_properties = extra_properties
@property
def assigned_team(self):
"""Gets the assigned_team of this XmppIntegration. # noqa: E501
:return: The assigned_team of this XmppIntegration. # noqa: E501
:rtype: TeamMeta
"""
return self._assigned_team
@assigned_team.setter
def assigned_team(self, assigned_team):
"""Sets the assigned_team of this XmppIntegration.
:param assigned_team: The assigned_team of this XmppIntegration. # noqa: E501
:type: TeamMeta
"""
self._assigned_team = assigned_team
@property
def feature_type(self):
"""Gets the feature_type of this XmppIntegration. # noqa: E501
:return: The feature_type of this XmppIntegration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this XmppIntegration.
:param feature_type: The feature_type of this XmppIntegration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this XmppIntegration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this XmppIntegration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this XmppIntegration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_read_access(self):
"""Gets the allow_read_access of this XmppIntegration. # noqa: E501
:return: The allow_read_access of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_read_access
@allow_read_access.setter
def allow_read_access(self, allow_read_access):
"""Sets the allow_read_access of this XmppIntegration.
:param allow_read_access: The allow_read_access of this XmppIntegration. # noqa: E501
:type: bool
"""
self._allow_read_access = allow_read_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this XmppIntegration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this XmppIntegration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this XmppIntegration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
@property
def allow_delete_access(self):
"""Gets the allow_delete_access of this XmppIntegration. # noqa: E501
:return: The allow_delete_access of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._allow_delete_access
@allow_delete_access.setter
def allow_delete_access(self, allow_delete_access):
"""Sets the allow_delete_access of this XmppIntegration.
:param allow_delete_access: The allow_delete_access of this XmppIntegration. # noqa: E501
:type: bool
"""
self._allow_delete_access = allow_delete_access
@property
def alert_filter(self):
"""Gets the alert_filter of this XmppIntegration. # noqa: E501
:return: The alert_filter of this XmppIntegration. # noqa: E501
:rtype: AlertFilter
"""
return self._alert_filter
@alert_filter.setter
def alert_filter(self, alert_filter):
"""Sets the alert_filter of this XmppIntegration.
:param alert_filter: The alert_filter of this XmppIntegration. # noqa: E501
:type: AlertFilter
"""
self._alert_filter = alert_filter
@property
def alert_actions(self):
"""Gets the alert_actions of this XmppIntegration. # noqa: E501
:return: The alert_actions of this XmppIntegration. # noqa: E501
:rtype: list[str]
"""
return self._alert_actions
@alert_actions.setter
def alert_actions(self, alert_actions):
"""Sets the alert_actions of this XmppIntegration.
:param alert_actions: The alert_actions of this XmppIntegration. # noqa: E501
:type: list[str]
"""
self._alert_actions = alert_actions
@property
def callback_type(self):
"""Gets the callback_type of this XmppIntegration. # noqa: E501
:return: The callback_type of this XmppIntegration. # noqa: E501
:rtype: str
"""
return self._callback_type
@callback_type.setter
def callback_type(self, callback_type):
"""Sets the callback_type of this XmppIntegration.
:param callback_type: The callback_type of this XmppIntegration. # noqa: E501
:type: str
"""
allowed_values = ["bidirectional-callback", "campfire-callback", "flowdock-callback", "flowdock-v2-callback", "planio-callback"] # noqa: E501
if callback_type not in allowed_values:
raise ValueError(
"Invalid value for `callback_type` ({0}), must be one of {1}" # noqa: E501
.format(callback_type, allowed_values)
)
self._callback_type = callback_type
@property
def send_alert_actions(self):
"""Gets the send_alert_actions of this XmppIntegration. # noqa: E501
:return: The send_alert_actions of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._send_alert_actions
@send_alert_actions.setter
def send_alert_actions(self, send_alert_actions):
"""Sets the send_alert_actions of this XmppIntegration.
:param send_alert_actions: The send_alert_actions of this XmppIntegration. # noqa: E501
:type: bool
"""
self._send_alert_actions = send_alert_actions
@property
def bidirectional_callback_type(self):
"""Gets the bidirectional_callback_type of this XmppIntegration. # noqa: E501
:return: The bidirectional_callback_type of this XmppIntegration. # noqa: E501
:rtype: str
"""
return self._bidirectional_callback_type
@bidirectional_callback_type.setter
def bidirectional_callback_type(self, bidirectional_callback_type):
"""Sets the bidirectional_callback_type of this XmppIntegration.
:param bidirectional_callback_type: The bidirectional_callback_type of this XmppIntegration. # noqa: E501
:type: str
"""
allowed_values = ["connect-wise-callback", "desk-callback", "es-watcher-callback", "hip-chat-add-on-callback", "hip-chat-callback-v2", "icinga2-callback", "icinga-callback", "marid-callback", "mattermost-callback", "nagios-based-v1-callback", "nagios-based-v2-callback", "nagios-xiv1-callback", "nagios-xiv2-callback", "slack-app-callback", "slack-callback", "solarwinds-callback", "solar-winds-web-help-desk-callback", "stackdriver-callback", "status-io-callback", "track-it-callback", "xmpp-callback", "zabbix-callback", "zenoss-callback"] # noqa: E501
if bidirectional_callback_type not in allowed_values:
raise ValueError(
"Invalid value for `bidirectional_callback_type` ({0}), must be one of {1}" # noqa: E501
.format(bidirectional_callback_type, allowed_values)
)
self._bidirectional_callback_type = bidirectional_callback_type
@property
def send_via_marid(self):
"""Gets the send_via_marid of this XmppIntegration. # noqa: E501
:return: The send_via_marid of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._send_via_marid
@send_via_marid.setter
def send_via_marid(self, send_via_marid):
"""Sets the send_via_marid of this XmppIntegration.
:param send_via_marid: The send_via_marid of this XmppIntegration. # noqa: E501
:type: bool
"""
self._send_via_marid = send_via_marid
@property
def send_via_oec(self):
"""Gets the send_via_oec of this XmppIntegration. # noqa: E501
:return: The send_via_oec of this XmppIntegration. # noqa: E501
:rtype: bool
"""
return self._send_via_oec
@send_via_oec.setter
def send_via_oec(self, send_via_oec):
"""Sets the send_via_oec of this XmppIntegration.
:param send_via_oec: The send_via_oec of this XmppIntegration. # noqa: E501
:type: bool
"""
self._send_via_oec = send_via_oec
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(XmppIntegration, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XmppIntegration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6b341716384a6651d588424bc560a8f0568282bc | fc089cc607c2ae12108c74541605c5de3ff6b162 | /virtual/bin/gunicorn | 1e2bb101cb1b94145b1ae2942f40a455337f8173 | [
"MIT"
] | permissive | sirrotich/Awwards | bb5b77a1fe160ca3604eacace3522b17c4c4dfa5 | 121071b11fe70c7e9f74a52038344219f313f1e1 | refs/heads/master | 2022-12-11T08:05:39.801685 | 2019-05-28T15:07:57 | 2019-05-28T15:07:57 | 188,370,445 | 0 | 0 | null | 2022-12-08T05:10:33 | 2019-05-24T07:04:01 | Python | UTF-8 | Python | false | false | 256 | #!/home/kipngeno/Documents/awwwards/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from gunicorn.app.wsgiapp import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
790b229755e6c18385e27601143ce131475c697b | a1cd1135cd7bc3255e29632fe6c025cffd231285 | /rpc/retrying.py | 34ab31def9d6707643ec0c70eb3fa9ad7a504739 | [] | no_license | liguopeng80/gcommon.py27 | 5f8d3ac9fe85c7134cfbb557ec06a61184b58fd1 | 900cd0717c7a9db90793752fd5cbf9a576286497 | refs/heads/master | 2023-08-11T16:01:16.566945 | 2021-10-10T07:08:54 | 2021-10-10T07:08:54 | 404,542,040 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,579 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2015-05-19
"""对失败的操作进行重试
TODO: 为 RPC 调用增加超时检测 - 当 rabbitmq 或者 rpc server 出现问题时,客户端不能长久等待。
"""
import time
import logging
from twisted.internet.defer import inlineCallbacks, maybeDeferred, returnValue
from gcommon.rpc import RpcServerException
from gcommon.utils.counters import Sequence
from gcommon.utils.timer import AsyncTimer
logger = logging.getLogger('rpc')
def patch_thrift_client(retrying, client):
"""为 thrift client 的类成员函数(RPC 接口函数)增加 retry 功能。
client -> Thrift Client class (not object).
"""
client_interface = client.__implemented__.declared[0]
for name in client_interface._InterfaceClass_attrs:
value = getattr(client, name, None)
if callable(value):
new_value = retrying.rpc_retry(value)
setattr(client, name, new_value)
class TwistedRetrying(object):
"""重试异步操作"""
RETRY_INTERVAL = 1.1
STOP_MAX_RETRY_TIMES = 4
STOP_MAX_DELAY = 10
_sequence = Sequence()
def __init__(self, identifier='', retry_interval=0, max_retry_times=0, max_delay=0):
self.retry_interval = retry_interval or self.RETRY_INTERVAL
self.max_retry_times = max_retry_times or self.STOP_MAX_RETRY_TIMES
self.max_delay = max_delay or self.STOP_MAX_DELAY
self._id = self._sequence.next_value()
if identifier:
self._id = "%06d.%s" % (self._id, identifier)
else:
self._id = "%06d" % self._id
def rpc_retry(self, func):
"""Decorator"""
@inlineCallbacks
def __wrap(client_obj, *args, **kwargs):
result = yield self.call(client_obj, func, *args, **kwargs)
returnValue(result)
__wrap.__name__ = func.__name__
return __wrap
@inlineCallbacks
def call(self, client_obj, func, *args, **kwargs):
"""带有重试功能的 RPC 调用
client_obj -> RPC client 实例
func -> 未绑定的 RPC client 成员函数
"""
member_func = func.__get__(client_obj)
result = yield self.call_member_func(member_func, *args, **kwargs)
returnValue(result)
@inlineCallbacks
def call_member_func(self, func, *args, **kwargs):
"""带有重试功能的 RPC 调用"""
start_time = int(round(time.time() * 1000))
attempt_number = 0
while True:
attempt_number += 1
try:
logger.debug('[%s] try rpc request: %s, %s, args: %s', attempt_number, self._id, func.__name__, args)
result = yield maybeDeferred(func, *args, **kwargs)
except RpcServerException, e:
logger.warn('[%s] server error on rpc request: %s, error: %s', self._id, func.__name__, e)
yield AsyncTimer.start(self.retry_interval)
# 判断是否还可以继续重试
now_time = int(round(time.time() * 1000))
if (now_time - start_time > self.max_delay) or (attempt_number > self.max_retry_times):
raise
else:
continue
except Exception, e:
logger.warn('[%s] unexpected error on rpc request: %s, error: %s', self._id, func.__name__, e)
raise
else:
logger.debug('[%s] rpc request finished with result: %s', self._id, func.__name__, result)
returnValue(result)
| [
"[email protected]"
] | |
fe3d52f03d805c065f4d5d608a4a3edca9d48773 | 739e19aea52a747a818ccaa1e941f11328ca9783 | /PatternRecognitionPractice/opencv-python/opencv_test0.py | c32ce4c4f68f1071569db1fa4e71bb065257ddeb | [] | no_license | MoCuishle28/python-practice | d12edb4866361f55354da53875475f05c209254c | cc557fcdd3fec2cb67efeb1f875b4d7d9d85b5b4 | refs/heads/master | 2020-03-28T03:52:36.060049 | 2019-01-19T11:53:15 | 2019-01-19T11:53:15 | 147,677,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,817 | py | import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
SZ=20
bin_n = 16 # Number of bins
affine_flags = cv.WARP_INVERSE_MAP|cv.INTER_LINEAR
# 使用二阶矩矫正图像
def deskew(img):
m = cv.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
# 图像的平移,参数:输入图像、变换矩阵、变换后的大小
img = cv.warpAffine(img, M, (SZ, SZ), flags=affine_flags)
return img
def hog(img):
'''先用sobel提取轮廓信息 再做轮廓信息的统计直方图'''
# 计算图像的 X 方向和 Y 方向的 Sobel 导数(梯度滤波器)
gx = cv.Sobel(img, cv.CV_32F, 1, 0)
gy = cv.Sobel(img, cv.CV_32F, 0, 1)
mag, ang = cv.cartToPolar(gx, gy) # 笛卡尔坐标转换为极坐标, → magnitude, angl
bins = np.int32(bin_n*ang/(2*np.pi))
bin_cells = bins[:10,:10], bins[10:,:10], bins[:10,10:], bins[10:,10:]
mag_cells = mag[:10,:10], mag[10:,:10], mag[:10,10:], mag[10:,10:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists) # hist 是64位的向量
return hist
img = cv.imread('digits.png',0)
if img is None:
raise Exception("we need the digits.png image from samples/data here !")
cells = [np.hsplit(row,100) for row in np.vsplit(img,50)] # 横轴切成50份 -> 每份纵轴切成100份
# 二分训练集和测试集
train_cells = [ i[:50] for i in cells ]
test_cells = [ i[50:] for i in cells]
deskewed = [list(map(deskew, row)) for row in train_cells] # 对训练集每个样例做抗扭曲处理
hogdata = [list(map(hog, row)) for row in deskewed] # 提取特征值
trainData = np.float32(hogdata).reshape(-1,64)
# 显示特征值
print(trainData[0].shape)
print(trainData[0])
pca = PCA(n_components=2, svd_solver='arpack')
test_pca = trainData
pca_mat = pca.fit_transform(test_pca)
np.savetxt("trainMat.txt", pca_mat)
pca_3d = PCA(n_components=3, svd_solver='arpack')
test_pca_3d = trainData
pca_mat = pca_3d.fit_transform(test_pca_3d)
np.savetxt("trainMat_3D.txt", pca_mat)
responses = np.repeat(np.arange(10),250)[:,np.newaxis]
np.savetxt("labels.txt", responses)
svm = cv.ml.SVM_create()
svm.setKernel(cv.ml.SVM_LINEAR)
svm.setType(cv.ml.SVM_C_SVC)
svm.setC(2.67)
svm.setGamma(5.383)
svm.train(trainData, cv.ml.ROW_SAMPLE, responses)
svm.save('svm_data.dat')
# 预处理测试数据集
deskewed = [list(map(deskew,row)) for row in test_cells]
hogdata = [list(map(hog,row)) for row in deskewed]
testData = np.float32(hogdata).reshape(-1,bin_n*4)
result = svm.predict(testData)[1]
mask = result==responses
correct = np.count_nonzero(mask)
print(correct*100.0/result.size) | [
"[email protected]"
] | |
e2f402bfc9e62ee0c6c90852bd8a66c383ce4be4 | c09e0d3dd9105e131b5c9cc0c2076e7103263d9f | /bigiq/tests/unit/mock/procenv.py | 5359764b2fe65685847374c18a884ad5ba39a9e9 | [] | no_license | gundalow-collections/f5 | 693166aa8f270df37a763084d45d7f318b1c63e4 | cdd14055c1615225e0050b6e7b47c38513bcd4c6 | refs/heads/master | 2020-07-24T05:51:08.722791 | 2019-09-16T20:11:07 | 2019-09-16T20:11:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,728 | py | # (c) 2016, Matt Davis <[email protected]>
# (c) 2016, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import json
from contextlib import contextmanager
from io import BytesIO, StringIO
from ansible_collections.f5.bigiq.tests.unit.compat import unittest
from ansible.module_utils.six import PY3
from ansible.module_utils._text import to_bytes
@contextmanager
def swap_stdin_and_argv(stdin_data='', argv_data=tuple()):
"""
context manager that temporarily masks the test runner's values for stdin and argv
"""
real_stdin = sys.stdin
real_argv = sys.argv
if PY3:
fake_stream = StringIO(stdin_data)
fake_stream.buffer = BytesIO(to_bytes(stdin_data))
else:
fake_stream = BytesIO(to_bytes(stdin_data))
try:
sys.stdin = fake_stream
sys.argv = argv_data
yield
finally:
sys.stdin = real_stdin
sys.argv = real_argv
@contextmanager
def swap_stdout():
"""
context manager that temporarily replaces stdout for tests that need to verify output
"""
old_stdout = sys.stdout
if PY3:
fake_stream = StringIO()
else:
fake_stream = BytesIO()
try:
sys.stdout = fake_stream
yield fake_stream
finally:
sys.stdout = old_stdout
class ModuleTestCase(unittest.TestCase):
def setUp(self, module_args=None):
if module_args is None:
module_args = {'_ansible_remote_tmp': '/tmp', '_ansible_keep_remote_files': False}
args = json.dumps(dict(ANSIBLE_MODULE_ARGS=module_args))
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap.__enter__()
def tearDown(self):
# unittest doesn't have a clean place to use a context manager, so we have to enter/exit manually
self.stdin_swap.__exit__(None, None, None)
| [
"[email protected]"
] | |
5e33b27fa72491228a4057a288364b7805bb6d9e | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/자료구조_20200628162237.py | fbca173ec511a428e941dea3044ae6928b57b31c | [] | no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | # 자료구조의 변경
# 커피숍
menu = {"커피", "우유", "주스"}
print(menu, type(menu))
menu = list(menu)
print(menu, type(menu))
menu = tuple(menu)
print(men) | [
"[email protected]"
] | |
ce4d41938b683c109eff56cd7cd1d6056ec7302c | 14aed8f5a144bd8b3833e7a9d5a1c8fddaeb1590 | /sparkSQL/venv/bin/pip3.6 | dbb1c08a0d63fc2102890a5377d12067f5fc537d | [] | no_license | TorpidCoder/BigData-Autoscaling | 2cc393fcb05b9b09a637302770bdf367f162425f | 7ee3538c7ccf877fd6868698891f29dcd9947df4 | refs/heads/master | 2021-06-22T11:02:27.667140 | 2020-12-04T04:14:10 | 2020-12-04T04:14:10 | 159,915,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 414 | 6 | #!/Users/sahilnagpal/PycharmProjects/sparkSQL/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
| [
"[email protected]"
] | |
3686c5bf4ae77bfa15f56b8cddff65549105c820 | ded0c895f6e1f8853f2222ae498bdc7ae52ef0e4 | /week-09/colori_quad.py | 727580ef3a1faf286bfeb82c09060522fe57c161 | [] | no_license | LorenzoLMP/TD2015 | 6a8846b4592b32db81338b8522a10a2dc52531c1 | e39b51d48149d07c3cea682a02eeec4e69ffbabd | refs/heads/master | 2021-01-17T14:47:20.378339 | 2016-05-31T11:33:50 | 2016-05-31T11:33:50 | 43,302,288 | 0 | 0 | null | 2015-09-28T13:58:46 | 2015-09-28T13:23:45 | null | UTF-8 | Python | false | false | 4,251 | py | from pylab import *
from scipy import *
from scipy import optimize
from scipy import misc
import math
data = genfromtxt('col_mon_precisi_3.txt')
coeff = genfromtxt('coeffic_matrix_315')
xdata = data[:,0]
ydata = data[:,1]
cr = coeff[:,0]
cg = coeff[:,1]
cb = coeff[:,2]
#zdata = sfasa
#########################################
#r = (ydata[1348] + ydata[1347])/2
r = 0.0075
#g = (ydata[922] + ydata[923] + ydata[924] + ydata[925])/4
g = 0.0087
#b = (ydata[1131] + ydata[1132] + ydata[1133] + ydata[1134])/4
b = 0.00573
print('signal red = ', r)
print('signal green = ', g)
print('signal blue = ', b)
################################################
l = 718
ydata_norm = []
ydata_norm2 = []
for i in range(630):
ydata_norm.append( r*(cr[math.modf(i/2)[1]])**2 + g*(cg[math.modf(i/2)[1]])**2 + b*(cb[math.modf(i/2)[1]])**2 )
ydata_norm2.append( (r*(cr[math.modf(i/2)[1]])**2 + g*(cg[math.modf(i/2)[1]])**2 + b*(cb[math.modf(i/2)[1]])**2)/( (cr[math.modf(i/2)[1]])**2 + (cg[math.modf(i/2)[1]])**2 + (cb[math.modf(i/2)[1]])**2) )
ydata_norm = array(ydata_norm)
ydata_norm2 = array(ydata_norm2)
xdata_norm = []
for i in range(630):
xdata_norm.append(l+i)
xdata_norm = array(xdata_norm)
xdata1 = []
for i in range(630):
xdata1.append(xdata[i+718])
#xdata1.append(xdata[i+718]*(1 - 30*i/(600*1348)) )
xdata1 = array(xdata1)
ydata1 = []
for i in range(630):
ydata1.append(ydata[i+718])
ydata1 = array(ydata1)
sig_norm = []
for i in range(630):
sig_norm.append(ydata1[i]/( (cr[math.modf(i/2)[1]])**2 + (cg[math.modf(i/2)[1]])**2 + (cb[math.modf(i/2)[1]])**2 ) )
sig_norm = array(sig_norm)
##############################################
def fitfunc(x, *par):
return par[0]*10**(-27) + par[1]*10**(-25)*x
p0 = [1 , 6] #metti il valore inizialeee
##def fitfunc(x, *par):
## return par[0]*10**(-25)*x
##
##p0 = [5] #metti il valore inizialeee
rc('font', size=15)
#xlabel(r'$frequenza [Hz]$')
#ylabel(r'$Gain $')
#minorticks_on()
#Attivare per scala bilog
#xscale('log')
#yscale('log')
#xlim(80,30000)
#ylim(35,103)
############################################################
#Parte per plot dati
#grid('on', which = "both")
#title("Bode Diagram Gain-Phase", size = 15)
plot(xdata, ydata, linestyle="None",marker=".", color="black", markersize= 9, label='dati sperimentali')
#plot(xdata1, sig_norm, linestyle="None",marker=".", color="red", markersize= 9, label='dati sp. norm.')
#plot(xdata_norm, ydata_norm, linestyle="None",marker=".", color="green", markersize= 9, label='modello quadratico')
plot(xdata_norm, ydata_norm2, linestyle="None",marker=".", color="blue", markersize= 9, label='modello normalizzato')
#plot(xdata1, ydata1, linestyle="None",marker=".", color="brown", markersize= 10)
title("Photovoltaic cell - response from monitor (quad)", size = 15)
annotate("red",
xy=(718, 0.0075), xycoords='data',
xytext=(750, 0.005), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.4", color="r"),
)
annotate("green",
xy=(926, 0.0086), xycoords='data',
xytext=(900, 0.0055), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.4", color="g"),
)
annotate("blue",
xy=(1130, 0.0055), xycoords='data',
xytext=(1100, 0.0040), textcoords='data',
size=20, va="center", ha="center",
arrowprops=dict(arrowstyle="simple",
connectionstyle="arc3,rad=-0.4", color="b"),
)
legend()
#errorbar(xdata, ydata, sigmay, sigmax, linestyle="None", color="black")
#xscale('log')
xlim(690,1370)
ylim(0.001,0.022)
xlabel(r'time $ [ms]$')
ylabel(r'Tensione $ [V]$')
grid('on', which = "both")
out_file = open('intens_screen_week9.txt', 'w')
out_file.write("#t(s)\tintens\n")
i= 0
while i< len(xdata_norm):
out_file.write("%s\t%s\n"%((xdata_norm[i]-718)/2, ydata_norm2[i]))
i = i+2
out_file.close()
savefig('dati_normal.png', dpi=400)
show()
| [
"[email protected]"
] | |
6bcffb1fb992aa4432f982b6faf84cacf761eb87 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2824/60891/284880.py | 8c2661ffda0fed50b0e0e907e437fd1d941f63fa | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | n_t_c = [int(i) for i in input().split()]
n = n_t_c[0]
t = n_t_c[1]
c = n_t_c[2]
p = [int(i) for i in input().split()]
list_index = []
for i in range(n):
if p[i] > t:
list_index.append(i)
list_num = []
if len(list_index) == 0:
ans = n - (c - 1)
else:
list_num = [list_index[0] - 0]
for i in range(len(list_index) - 1):
list_num.append(list_index[i + 1] - list_index[i] - 1)
ans = 0
for i in list_num:
if i>=c:
ans += i - (c - 1)
print(ans)
| [
"[email protected]"
] | |
882da9b06c3c8215bad47901f52fb0b1a2f538be | 9e201dfe87446274995add9a1436d392ced616c9 | /draco2/draco/__init__.py | 0d19522fdb4d56bebbbeb78c05eaa465d8784dc1 | [
"MIT"
] | permissive | geertj/draco2 | 9da00f68016a16a82be9c7556e08ca06611bba9b | 3a533d3158860102866eaf603840691618f39f81 | refs/heads/master | 2021-01-01T06:45:37.111786 | 2007-04-30T13:37:00 | 2007-04-30T13:37:00 | 2,787,375 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 415 | py | # vi: ts=8 sts=4 sw=4 et
#
# __init__.py: draco2.draco package definition
#
# This file is part of Draco2. Draco2 is free software and is made available
# under the MIT license. Consult the file "LICENSE" that is distributed
# together with this file for the exact licensing terms.
#
# Draco2 is copyright (c) 1999-2007 by the Draco2 authors. See the file
# "AUTHORS" for a complete overview.
#
# $Revision: 1187 $
| [
"[email protected]"
] | |
9156bc3c57f454eadadfdd4973c2129250dd9c13 | 8246e9fbdecdb37651e0d09497fd9428e434f33c | /SubscriptionPlan/tests.py | 5f2a8d977297e1657b4b0c439101b8974c6bc3cc | [] | no_license | rajeev1234/Landing-Page | 479995026ab01fc504a1e9502e7763dc04266009 | 4bfd22a6a1776907ba78b3dc9037064c820b049e | refs/heads/master | 2020-03-08T13:37:20.253252 | 2018-04-05T06:33:26 | 2018-04-05T06:33:26 | 128,162,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,609 | py | from django.test import TestCase,SimpleTestCase
from django.urls import reverse,reverse_lazy
from .models import SubscriptionPlan
from django.conf import settings
from django.contrib.auth import get_user_model
from django.utils import timezone
# Test Class for SubscriptionPlan Application
class SubscriptionPlanTest(TestCase):
########################## Model Testing #### ########################
# SubscriptionPlan object with dummy data
def setUp(self):
# dummy user for login
self.user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password = 'test'
)
self.SubscriptionPlan = SubscriptionPlan.objects.create(
# Fields according to defined in Model
SubscriptionPlan_Amount = 199,
SubscriptionPlan_End_Date = timezone.now(),
SubscriptionPlan_FOR_FILM_COIN = 1000,
SubscriptionPlan_Openings_Allowed = True,
SubscriptionPlan_Location_Allowed = True,
SubscriptionPlan_Pitch_Allowed = True,
SubscriptionPlan_Pitch_Box_Capacity_Image_per_pitch = 111,
SubscriptionPlan_Start_Date = timezone.now(),
SubscriptionPlan_Type = 'SubscriptionPlan_Type',
SubscriptionPlan_User_ID = self.user,
)
#-----------------------------------------------------------------------------------------#
# Check redirection URL
def test_get_absolute_url(self):
# Redirection goes to SubscriptionPlan details
self.assertEquals(self.SubscriptionPlan.get_absolute_url(), '/subscriptionplan/1')
#-----------------------------------------------------------------------------------------#
# Check Conent of SubscriptionPlan object created by create object query set
def test_SubscriptionPlan_content(self):
# Verify for each field
self.assertEqual(int(f'{self.SubscriptionPlan.SubscriptionPlan_Amount}'), 199)
self.assertEqual(int(f'{self.SubscriptionPlan.SubscriptionPlan_FOR_FILM_COIN}'), 1000)
self.assertEqual(bool(f'{self.SubscriptionPlan.SubscriptionPlan_Openings_Allowed}'), True)
self.assertEqual(bool(f'{self.SubscriptionPlan.SubscriptionPlan_Location_Allowed}'), True)
self.assertEqual(bool(f'{self.SubscriptionPlan.SubscriptionPlan_Pitch_Allowed}'), True)
self.assertEqual(int(f'{self.SubscriptionPlan.SubscriptionPlan_Pitch_Box_Capacity_Image_per_pitch}'), 111)
self.assertEqual(f'{self.SubscriptionPlan.SubscriptionPlan_Type}', 'SubscriptionPlan_Type')
self.assertEqual(f'{self.SubscriptionPlan.SubscriptionPlan_User_ID}', self.user.username)
#--------------------------------------------------------------------------------------------#
# ############################# Model Test End ###########################################
# ############################### Views Test ########################################
# Test SubscriptionPlan List View
def test_SubscriptionPlanList_view(self):
# Login the user defined in SetUp
self.client.login(username='testuser', password='test')
# Get respomse from defined URL namespace
response = self.client.get(reverse('SubscriptionPlan_list'))
self.assertEqual(response.status_code, 200)
# Check Content of List View
self.assertContains(response,199)
self.assertContains(response,'SubscriptionPlan_Type')
# Check for Correct template used in template/SubscriptionPlans
self.assertTemplateUsed(response, 'SubscriptionPlans/SubscriptionPlan_list.html')
#--------------------------------------------------------------------------------------------#
# Test SubscriptionPlan Detail View
def test_SubscriptionPlanDetail_view(self):
# Login the user defined in SetUp
self.client.login(username='testuser', password='test')
# Find primary key of table
SubscriptionPlan_pk = SubscriptionPlan.objects.get(SubscriptionPlan_Amount=199).pk
# Get response
response = self.client.get(reverse_lazy('SubscriptionPlan_details',kwargs={'pk':SubscriptionPlan_pk}))
# Check for any invalid value
no_response = self.client.get(reverse_lazy('SubscriptionPlan_details',kwargs={'pk':10000}))
# 202 for valid and 404 for invalid
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
# check for content of Detail Page
self.assertContains(response,self.user.username)
# Check for Correct template used in template/SubscriptionPlans
self.assertTemplateUsed(response, 'SubscriptionPlans/SubscriptionPlan_details.html')
#-------------------------------------------------------------------------------------------#
# Test SubscriptionPlan Create View
def test_SubscriptionPlanCreate_view(self):
# Login the user defined in SetUps
self.client.login(username='testuser', password='test')
# Generate response after creating an view using Http post method
response = self.client.post('/subscriptionplan/new/', {
'SubscriptionPlan_Amount' : 199,
'SubscriptionPlan_End_Date' : timezone.now(),
'SubscriptionPlan_FOR_FILM_COIN' : 1000,
'SubscriptionPlan_Openings_Allowed' : True,
'SubscriptionPlan_Location_Allowed' : True,
'SubscriptionPlan_Pitch_Allowed' : True,
'SubscriptionPlan_Pitch_Box_Capacity_Image_per_pitch' : 111,
'SubscriptionPlan_Start_Date' : timezone.now(),
'SubscriptionPlan_Type' : 'SubscriptionPlan_Type',
'SubscriptionPlan_User_ID' : self.user,
})
# print(response.content)
# Check for successful response
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'SubscriptionPlan_Type')
self.assertContains(response, self.user.username)
self.assertContains(response, 'checked')
self.assertContains(response, 111)
self.assertContains(response, 1000)
self.assertContains(response, 199)
# Check for correct template used in template/SubscriptionPlans
self.assertTemplateUsed(response, 'SubscriptionPlans/SubscriptionPlan_new.html')
#---------------------------------------------------------------------------------------#
# Test SubscriptionPlan Update view
def test_SubscriptionPlanupdate_view(self):
# Login the user
self.client.login(username='testuser', password='test')
# Find primary key of table
SubscriptionPlan_pk = SubscriptionPlan.objects.get(SubscriptionPlan_Type='SubscriptionPlan_Type').pk
# Get response using pk on details view
response = self.client.get(reverse_lazy('SubscriptionPlan_details',kwargs={'pk':SubscriptionPlan_pk}), {
'SubscriptionPlan_Amount' : 199,
'SubscriptionPlan_End_Date' : timezone.now(),
'SubscriptionPlan_FOR_FILM_COIN' : 1000,
'SubscriptionPlan_Openings_Allowed' : True,
'SubscriptionPlan_Location_Allowed' : True,
'SubscriptionPlan_Pitch_Allowed' : True,
'SubscriptionPlan_Pitch_Box_Capacity_Image_per_pitch' : 111,
'SubscriptionPlan_Start_Date' : timezone.now(),
'SubscriptionPlan_Type' : 'SubscriptionPlan_Type',
'SubscriptionPlan_User_ID' : self.user,
})
# Check for successful response
self.assertEqual(response.status_code, 200)
# Check for correct templates
self.assertTemplateUsed(response,'SubscriptionPlans/SubscriptionPlan_details.html')
#--------------------------------------------------------------------------------------------#
# Test Delete View of SubscriptionPlan views
def test_SubscriptionPlandelete_view(self):
# Login the user
self.client.login(username='testuser', password='test')
#Find primary key of table
SubscriptionPlan_pk = SubscriptionPlan.objects.get(SubscriptionPlan_Type='SubscriptionPlan_Type').pk
# Get response to delete
response = self.client.get(reverse_lazy('SubscriptionPlan_delete',kwargs={'pk':SubscriptionPlan_pk}))
# self.assertContains(response, 'Are you sure you want to delete') # THIS PART WORKS
# Check deleted value , returns false i.e.302
post_response = self.client.post(reverse_lazy('SubscriptionPlan_delete',kwargs={'pk':SubscriptionPlan_pk}))
# self.assertRedirects(post_response, reverse_lazy('SubscriptionPlan_delete',kwargs={'pk':SubscriptionPlan_pk}), status_code=302)
self.assertEqual(response.status_code, 200)
# check for Correct Template Used
self.assertTemplateUsed(response, 'SubscriptionPlans/SubscriptionPlan_delete.html')
# ################################ View Testing End #################################################
# ################################ Testing the URLs ##############################################
class PagesTests(SimpleTestCase):
# Check URL for list/ Home
def test_home_page_status_code(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
# #-----------------------------------------------------------------------------------------------------#
# # URL for new
# def test_new_page_status_code(self):
# # Login the user defined in SetUp
# # self.client.login(username='testuser', password='test')
# # Get response
# response = self.client.get('/SubscriptionPlans/1/')
# self.assertEqual(response.status_code, 200)
#------------------------------------------------------------------------------------------------------#
# # def test_update_page_status_code(self):
# # # url = reverse('SubscriptionPlanListView')
# # response = self.client.get('/SubscriptionPlans/1/')
# # self.assertEqual(response.status_code, 200)
#-------------------------------------------------------------------------------------------------------#
# # def test_detail_page_status_code(self):
# # response = self.client.get('/{1}/')
# # self.assertEqual(response.status_code, 200)
#-------------------------------------------------------------------------------------------------------#
# # def test_delete_page_status_code(self):
# # response = self.client.get('/{1}/delete/')
# # self.assertEqual(response.status_code, 200) | [
"[email protected]"
] | |
0a724a2e65e58755be2cbd93ecc23710dc2da8e5 | 0d4139330dda389664df2e79b397f8153e6c1189 | /backend/site_management_25562/urls.py | 726931ba898e10c60a17c2b354ef452b4cc09a43 | [] | no_license | crowdbotics-apps/site-management-25562 | d7a4d08ed286838814d892096510ca642e59a574 | 704d21f8eeec8f4bc20d4478e34e5196e76c03d0 | refs/heads/master | 2023-04-08T11:40:14.414118 | 2021-04-08T18:13:21 | 2021-04-08T18:13:21 | 356,007,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | """site_management_25562 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Site_Management"
admin.site.site_title = "Site_Management Admin Portal"
admin.site.index_title = "Site_Management Admin"
# swagger
api_info = openapi.Info(
title="Site_Management API",
default_version="v1",
description="API documentation for Site_Management App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
edcb661a53d5e24b760420759edcc68d6943de55 | 0523136530c8caf2a7aacbc52aae43dc8998ca18 | /ship.py | 5b3fb775270d8b76d0692d8664f7cd786c3d7505 | [] | no_license | irfan87/alien_invasion_pygame | 1d90eb1914e16a84b8318af92dd102f3a4201313 | 71587cbb13d5ea157e325e19c439ceb94c029d5d | refs/heads/master | 2020-07-06T17:13:44.520471 | 2019-08-20T03:12:58 | 2019-08-20T03:12:58 | 203,088,226 | 0 | 0 | null | 2019-08-20T03:12:59 | 2019-08-19T03:00:27 | Python | UTF-8 | Python | false | false | 1,519 | py | import pygame
from pygame.sprite import Sprite
class Ship(Sprite):
# a class to manage the ship
def __init__(self, ai_game):
# initialize the ship and set its statring position
super().__init__()
self.screen = ai_game.screen
self.screen_rect = ai_game.screen.get_rect()
self.settings = ai_game.settings
# load the space ship image
self.image = pygame.image.load('images/ship.bmp')
self.rect = self.image.get_rect()
# start each new ship at the bottom center of the screen
self.rect.midbottom = self.screen_rect.midbottom
# store a decimal value for the ship's horizontal position
self.x = float(self.rect.x)
# movement flag
self.moving_right = False
self.moving_left = False
def update(self):
# update the ship's position based on the movement flag
# update the ship's value, not the
if self.moving_right and self.rect.right < self.screen_rect.right:
self.x += self.settings.ship_speed
if self.moving_left and self.rect.left > 0:
self.x -= self.settings.ship_speed
# update rect object form self.x
self.rect.x = self.x
def center_ship(self):
# center the ship on the screen
self.rect.midbottom = self.screen_rect.midbottom
self.x = float(self.rect.x)
def blitme(self):
# draw the ship at its current location
self.screen.blit(self.image, self.rect) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.