blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
990659f28fe89f9d10375d9edc85a48e910e91f4
|
52b5fa23f79d76883728d8de0bfd202c741e9c43
|
/kubernetes/client/models/v1beta2_scale_status.py
|
af1d075c65c7c657052a68400f1a3e347c20cab5
|
[] |
no_license
|
kippandrew/client-python-tornado
|
5d00810f57035825a84e37ff8fc89a7e79aed8da
|
d479dfeb348c5dd2e929327d800fe033b5b3b010
|
refs/heads/master
| 2021-09-04T13:01:28.275677 | 2018-01-18T23:27:34 | 2018-01-18T23:27:34 | 114,912,995 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,119 |
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class V1beta2ScaleStatus(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'replicas': 'int',
'selector': 'dict(str, str)',
'target_selector': 'str'
}
attribute_map = {
'replicas': 'replicas',
'selector': 'selector',
'target_selector': 'targetSelector'
}
def __init__(self, replicas=None, selector=None, target_selector=None): # noqa: E501
"""V1beta2ScaleStatus - a model defined in Swagger""" # noqa: E501
self._replicas = None
self._selector = None
self._target_selector = None
self.discriminator = None
self.replicas = replicas
if selector is not None:
self.selector = selector
if target_selector is not None:
self.target_selector = target_selector
@property
def replicas(self):
"""Gets the replicas of this V1beta2ScaleStatus. # noqa: E501
actual number of observed instances of the scaled object. # noqa: E501
:return: The replicas of this V1beta2ScaleStatus. # noqa: E501
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""Sets the replicas of this V1beta2ScaleStatus.
actual number of observed instances of the scaled object. # noqa: E501
:param replicas: The replicas of this V1beta2ScaleStatus. # noqa: E501
:type: int
"""
if replicas is None:
raise ValueError("Invalid value for `replicas`, must not be `None`") # noqa: E501
self._replicas = replicas
@property
def selector(self):
"""Gets the selector of this V1beta2ScaleStatus. # noqa: E501
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors # noqa: E501
:return: The selector of this V1beta2ScaleStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""Sets the selector of this V1beta2ScaleStatus.
label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors # noqa: E501
:param selector: The selector of this V1beta2ScaleStatus. # noqa: E501
:type: dict(str, str)
"""
self._selector = selector
@property
def target_selector(self):
"""Gets the target_selector of this V1beta2ScaleStatus. # noqa: E501
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:return: The target_selector of this V1beta2ScaleStatus. # noqa: E501
:rtype: str
"""
return self._target_selector
@target_selector.setter
def target_selector(self, target_selector):
"""Sets the target_selector of this V1beta2ScaleStatus.
label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors # noqa: E501
:param target_selector: The target_selector of this V1beta2ScaleStatus. # noqa: E501
:type: str
"""
self._target_selector = target_selector
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta2ScaleStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
daed34a30c9a24d627c92ee6152105a324fac8aa
|
6d3bb1ab45d306b90e4727a7f1a0a31b59e7dc5a
|
/tests/test_kalman.py
|
3fd0488c1be9dc297e3914231b89d45bc73380e4
|
[
"MIT"
] |
permissive
|
NepalRobotics/BeliefSystem
|
6f0023783cb59d251897d20b49900a81294973d7
|
435e902062884b10f61582443f1fd4b118dd9d3f
|
refs/heads/devel
| 2016-09-05T10:33:30.446296 | 2016-01-31T22:47:50 | 2016-01-31T22:47:50 | 41,973,456 | 0 | 1 | null | 2016-01-31T22:47:50 | 2015-09-05T19:46:07 |
Python
|
UTF-8
|
Python
| false | false | 8,317 |
py
|
""" Tests for the Kalman filter. """
import numpy as np
import kalman
import tests
class _BaseTest(tests.BaseTest):
""" Defines common members for all the test cases in this file. """
# Constants the indexes of different things in the state.
_POS_X = 0
_POS_Y = 1
_VEL_X = 2
_VEL_Y = 3
# Index of the first LOB.
_LOB = 4
# Constants for the indices of the x and y components in a coordinate tuple.
_X = 0
_Y = 1
class KalmanTests(_BaseTest):
""" Tests for the Kalman class. """
def test_basic(self):
""" Tests that the filter gives reasonable values under extremely basic
circumstances. """
basic_filter = kalman.Kalman((1, 0), (1, 0))
basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (5, 0))
basic_filter.set_observations((2, 0), (1, 0),
basic_filter.normalize_lobs(0))
basic_filter.update()
state = basic_filter.state()
# We should see values that our quite close to our observation since,
# (surprise, surprise) our observation lines up EXACTLY with our model.
self._assert_near(2, state[self._POS_X], 0.01)
self._assert_near(0, state[self._POS_Y], 0.01)
self._assert_near(1, state[self._VEL_X], 0.01)
self._assert_near(0, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
covariances = basic_filter.state_covariances()
# Our covariances should have gotten close to zero, since our observations
# match our model so well.
for x in np.nditer(covariances):
self._assert_near(0, x, 0.05)
# Run another perfect iteration.
basic_filter.set_observations((3, 0), (1, 0),
basic_filter.normalize_lobs(0))
basic_filter.update()
state = basic_filter.state()
self._assert_near(3, state[self._POS_X], 0.01)
self._assert_near(0, state[self._POS_Y], 0.01)
self._assert_near(1, state[self._VEL_X], 0.01)
self._assert_near(0, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
covariances = basic_filter.state_covariances()
# Our covariances should be close again.
for x in np.nditer(covariances):
self._assert_near(0, x, 0.05)
def test_going_forward(self):
""" Tests that the model prediction still works if we rotate the whole thing
90 degrees. """
basic_filter = kalman.Kalman((0, 1), (0, 1))
basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (0, 5))
basic_filter.set_observations((0, 2), (0, 1),
basic_filter.normalize_lobs(0))
basic_filter.update()
state = basic_filter.state()
# We should see values that our quite close to our observation since,
# (surprise, surprise) our observation lines up EXACTLY with our model.
self._assert_near(0, state[self._POS_X], 0.01)
self._assert_near(2, state[self._POS_Y], 0.01)
self._assert_near(0, state[self._VEL_X], 0.01)
self._assert_near(1, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
covariances = basic_filter.state_covariances()
# Our covariances should have gotten close to zero, since our observations
# match our model so well.
for x in np.nditer(covariances):
self._assert_near(0, x, 0.05)
# Run another perfect iteration.
basic_filter.set_observations((0, 3), (0, 1),
basic_filter.normalize_lobs(0))
basic_filter.update()
state = basic_filter.state()
self._assert_near(0, state[self._POS_X], 0.01)
self._assert_near(3, state[self._POS_Y], 0.01)
self._assert_near(0, state[self._VEL_X], 0.01)
self._assert_near(1, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
covariances = basic_filter.state_covariances()
# Our covariances should be close again.
for x in np.nditer(covariances):
self._assert_near(0, x, 0.05)
def test_going_diagonal(self):
""" Tests that the model still works if we go at 45 degrees. """
basic_filter = kalman.Kalman((0, 0), (1, 1))
basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (5, 5))
basic_filter.set_observations((1, 1), (1, 1),
basic_filter.normalize_lobs(0))
basic_filter.update()
state = basic_filter.state()
# We should see values that our quite close to our observation since,
# (surprise, surprise) our observation lines up EXACTLY with our model.
self._assert_near(1, state[self._POS_X], 0.01)
self._assert_near(1, state[self._POS_Y], 0.01)
self._assert_near(1, state[self._VEL_X], 0.01)
self._assert_near(1, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
covariances = basic_filter.state_covariances()
# Our covariances should have gotten close to zero, since our observations
# match our model so well.
for x in np.nditer(covariances):
self._assert_near(0, x, 0.05)
# Run another perfect iteration.
basic_filter.set_observations((2, 2), (1, 1),
basic_filter.normalize_lobs(0))
basic_filter.update()
state = basic_filter.state()
self._assert_near(2, state[self._POS_X], 0.01)
self._assert_near(2, state[self._POS_Y], 0.01)
self._assert_near(1, state[self._VEL_X], 0.01)
self._assert_near(1, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
covariances = basic_filter.state_covariances()
# Our covariances should be close again.
for x in np.nditer(covariances):
self._assert_near(0, x, 0.05)
def test_transmitter_adding(self):
""" Tests that adding new transmitters in the middle works. """
basic_filter = kalman.Kalman((1, 0), (1, 0))
basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (5, 0))
basic_filter.set_observations((2, 0), (1, 0),
basic_filter.normalize_lobs(0))
basic_filter.update()
# Now, add another one.
basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (10, 0))
bearing = basic_filter.normalize_lobs(0)
basic_filter.set_observations((3, 0), (1, 0), bearing, bearing)
basic_filter.update()
state = basic_filter.state()
self._assert_near(3, state[self._POS_X], 0.01)
self._assert_near(0, state[self._POS_Y], 0.01)
self._assert_near(1, state[self._VEL_X], 0.01)
self._assert_near(0, state[self._VEL_Y], 0.01)
self._assert_near(basic_filter.normalize_lobs(0), state[self._LOB], 0.01)
def test_position_error_ellipse(self):
""" Tests that we can draw a reasonable position error ellipse. """
basic_filter = kalman.Kalman((1, 0), (1, 0))
width, height, angle = basic_filter.position_error_ellipse(1.96)
# Because our initial covariance matrix will make the variances for x and y
# the same, we expect the ellipse to be a cicle.
self.assertEqual(width, height)
# Our angle should be pi/2, since all our covariances are zero.
self.assertEqual(np.pi / 2.0, angle)
# Now give it an observation in which y is off a lot more than x.
basic_filter.set_observations((2, 1), (1, 0))
basic_filter.update()
width, height, angle = basic_filter.position_error_ellipse(1.96)
# Now, the width should be larger than the height. (It's rotated.)
self.assertGreater(width, height)
def test_lob_confidence(self):
""" Tests that we can compute a confidence interval for the LOB data. """
basic_filter = kalman.Kalman((1, 0), (1, 0))
self.assertEqual([], basic_filter.lob_confidence_intervals(1.96))
# Now give it a transmitter to track.
basic_filter.add_transmitter(basic_filter.normalize_lobs(0), (5, 0))
# We should have a non-zero margin of error.
error = basic_filter.lob_confidence_intervals(1.96)
self.assertGreater(error, 0)
def test_flip_transmitter(self):
""" Tests that the flip_transmitter method works. """
my_filter = kalman.Kalman((0, 0), (1, 1))
# Do something really easy first.
lob = np.pi / 4.0
my_filter.add_transmitter(lob, (2, 2))
my_filter.flip_transmitter(4)
self.assertEqual(5.0 * np.pi / 4.0, my_filter.lobs()[0])
# Now we should be able to flip it back.
my_filter.flip_transmitter(4)
self.assertEqual(np.pi / 4.0, my_filter.lobs()[0])
|
[
"[email protected]"
] | |
ff25fb6064326423077dde69fc7e8673e9a34bff
|
1c2428489013d96ee21bcf434868358312f9d2af
|
/ultracart/models/conversation_twilio_account.py
|
a649021e1f36c58540847bbbd1732a8064137324
|
[
"Apache-2.0"
] |
permissive
|
UltraCart/rest_api_v2_sdk_python
|
7821a0f6e0e19317ee03c4926bec05972900c534
|
8529c0bceffa2070e04d467fcb2b0096a92e8be4
|
refs/heads/master
| 2023-09-01T00:09:31.332925 | 2023-08-31T12:52:10 | 2023-08-31T12:52:10 | 67,047,356 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,199 |
py
|
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2 # noqa: E501
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ConversationTwilioAccount(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'merchant_id': 'str',
'twilio_phone_numbers': 'list[str]'
}
attribute_map = {
'merchant_id': 'merchant_id',
'twilio_phone_numbers': 'twilio_phone_numbers'
}
def __init__(self, merchant_id=None, twilio_phone_numbers=None): # noqa: E501
"""ConversationTwilioAccount - a model defined in Swagger""" # noqa: E501
self._merchant_id = None
self._twilio_phone_numbers = None
self.discriminator = None
if merchant_id is not None:
self.merchant_id = merchant_id
if twilio_phone_numbers is not None:
self.twilio_phone_numbers = twilio_phone_numbers
@property
def merchant_id(self):
"""Gets the merchant_id of this ConversationTwilioAccount. # noqa: E501
:return: The merchant_id of this ConversationTwilioAccount. # noqa: E501
:rtype: str
"""
return self._merchant_id
@merchant_id.setter
def merchant_id(self, merchant_id):
"""Sets the merchant_id of this ConversationTwilioAccount.
:param merchant_id: The merchant_id of this ConversationTwilioAccount. # noqa: E501
:type: str
"""
self._merchant_id = merchant_id
@property
def twilio_phone_numbers(self):
"""Gets the twilio_phone_numbers of this ConversationTwilioAccount. # noqa: E501
:return: The twilio_phone_numbers of this ConversationTwilioAccount. # noqa: E501
:rtype: list[str]
"""
return self._twilio_phone_numbers
@twilio_phone_numbers.setter
def twilio_phone_numbers(self, twilio_phone_numbers):
"""Sets the twilio_phone_numbers of this ConversationTwilioAccount.
:param twilio_phone_numbers: The twilio_phone_numbers of this ConversationTwilioAccount. # noqa: E501
:type: list[str]
"""
self._twilio_phone_numbers = twilio_phone_numbers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(ConversationTwilioAccount, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConversationTwilioAccount):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
10639ce27fe471d8d20685065f3271f45c21380c
|
4eac217f1a9c175ee370d83446c6ae763c69a26f
|
/Level 1/Task3.py
|
2cbc0809746a1495e4be9f85b91b33bc3ff7de91
|
[] |
no_license
|
abhinavsharma629/Data-Structures-And-Algorithms-Udacity-Nanodegree
|
411a74f042a671c6e7d3123bc63716d2d3748cc6
|
6e7645a9afb6065d12524a94734aeda022438f10
|
refs/heads/master
| 2022-11-06T00:49:11.863218 | 2020-06-20T11:52:42 | 2020-06-20T11:52:42 | 273,698,082 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,074 |
py
|
"""
Read file into texts and calls.
It's ok if you don't understand how to read files.
"""
import csv
with open('texts.csv', 'r') as f:
reader = csv.reader(f)
texts = list(reader)
def checkCaller(phone_no):
if(len(phone_no.split("("))>0):
return True
def checkCaller(phone_no):
if(phone_no[0]=="("):
return True if phone_no.split("(")[1].split(")")[0]=="080" else False
return False
def getReceiver(phone_no):
# if telephone no
if(phone_no[0]=="("):
return phone_no.split("(")[1].split(")")[0]
# if mobile no
elif(len(phone_no.split(" "))>0):
return phone_no.split(" ")[0][0:4]
# if telemarketers no
else:
return phone_no.split("140")[1]
list_of_codes=set([])
calls_total=0
call_and_receive_total=0
with open('calls.csv', 'r') as f:
reader = csv.reader(f)
calls = list(reader)
for call in calls:
# check if caller from banglore
isCallerFromBanglore=checkCaller(call[0])
getReceiverNo=getReceiver(call[1])
# if caller from banglore
if(isCallerFromBanglore):
list_of_codes.add(getReceiverNo)
# check if receiver from banglore
isReceiverFromBanglore=checkCaller(call[1])
if(isReceiverFromBanglore):
# inc banglore -> banglore calls count
call_and_receive_total+=1
# inc total banglore calls count
calls_total+=1
print("The numbers called by people in Bangalore have codes:")
list_of_codes=sorted(list_of_codes)
for list_code in list_of_codes:
print(list_code)
percent=round((float)((call_and_receive_total/calls_total))*100,2)
print("{} percent of calls from fixed lines in Bangalore are calls to other fixed lines in Bangalore.".format(percent))
"""
TASK 3:
(080) is the area code for fixed line telephones in Bangalore.
Fixed line numbers include parentheses, so Bangalore numbers
have the form (080)xxxxxxx.)
Part A: Find all of the area codes and mobile prefixes called by people
in Bangalore.
- Fixed lines start with an area code enclosed in brackets. The area
codes vary in length but always begin with 0.
- Mobile numbers have no parentheses, but have a space in the middle
of the number to help readability. The prefix of a mobile number
is its first four digits, and they always start with 7, 8 or 9.
- Telemarketers' numbers have no parentheses or space, but they start
with the area code 140.
Print the answer as part of a message:
"The numbers called by people in Bangalore have codes:"
<list of codes>
The list of codes should be print out one per line in lexicographic order with no duplicates.
Part B: What percentage of calls from fixed lines in Bangalore are made
to fixed lines also in Bangalore? In other words, of all the calls made
from a number starting with "(080)", what percentage of these calls
were made to a number also starting with "(080)"?
Print the answer as a part of a message::
"<percentage> percent of calls from fixed lines in Bangalore are calls
to other fixed lines in Bangalore."
The percentage should have 2 decimal digits
"""
|
[
"[email protected]"
] | |
f352d8ce22dd736ee5f0204bbde9717188e6d87c
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_groped.py
|
9e6f4052a029097be877a2408813e55edef8a879
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 231 |
py
|
from xai.brain.wordbase.nouns._grope import _GROPE
#calss header
class _GROPED(_GROPE, ):
def __init__(self,):
_GROPE.__init__(self)
self.name = "GROPED"
self.specie = 'nouns'
self.basic = "grope"
self.jsondata = {}
|
[
"[email protected]"
] | |
c326f428fae13c4af9cb46ab323d1a372aa587a4
|
b8a9b1204627c7d6b4123f4dba54631251e27f49
|
/accounts/migrations/0006_auto_20210313_1624.py
|
b672084659b2994715f8cc3ffbc278a53f407904
|
[] |
no_license
|
ianmanalo1026/Coffee-Shop
|
53aee5b4ff26294ead1808006c7d9ec258aca8d9
|
f61a94ee416aed436d236c7243625417c7214479
|
refs/heads/master
| 2023-03-16T12:24:02.354223 | 2021-03-16T14:10:23 | 2021-03-16T14:10:23 | 347,078,397 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 474 |
py
|
# Generated by Django 3.1.4 on 2021-03-13 08:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20210313_1606'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='first_name',
new_name='name',
),
migrations.RemoveField(
model_name='profile',
name='last_name',
),
]
|
[
"[email protected]"
] | |
80410beb2f4850b79e647b255bfd7626e96e2884
|
d3fa61d28cdc0c515ebd4f610122a9141cf69471
|
/gui/demoDlg-21.py
|
e155e49b73365362dea088ee8e5824694470688d
|
[] |
no_license
|
iorilan/python-samples
|
0bd2d66461bc5580de607c5e9984f713bc506c56
|
1db836d90731763e30a109c28948734727194232
|
refs/heads/master
| 2022-04-05T03:06:18.830332 | 2020-02-23T16:49:06 | 2020-02-23T16:49:06 | 213,014,416 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 810 |
py
|
from tkinter import * # get base widget set
from dialogTable import demos # button callback handlers
from quitter import Quitter # attach a quit object to me
class Demo(Frame):
def __init__(self, parent=None):
Frame.__init__(self, parent)
self.pack()
Label(self, text="Basic demos").pack()
for key in demos:
func = (lambda key=key: self.printit(key))
#func = (lambda self=self, name=key: self.printit(name))
#func = (lambda handler=self.printit, name=key: handler(name))
Button(self, text=key, command=func).pack(side=TOP, fill=BOTH)
Quitter(self).pack(side=TOP, fill=BOTH)
def printit(self, name):
print(name, 'returns =>', demos[name]()) # fetch, call, print
if __name__ == '__main__': Demo().mainloop()
|
[
"[email protected]"
] | |
f432c581c95ce2dab16294b0388c1934ff86ade5
|
9abd182d02355ddf0b79afd4a35f7127a4a66f7a
|
/tests/auto/test_auto_tasks.py
|
31cf99827d6d2993bc4ec767aa5e9f469a8fa5ce
|
[
"Apache-2.0"
] |
permissive
|
dmlc/gluon-cv
|
e1303086419a5733661d0fcb9095c09d4f2382ad
|
567775619f3b97d47e7c360748912a4fd883ff52
|
refs/heads/master
| 2023-07-19T12:02:36.824294 | 2023-01-19T00:37:33 | 2023-01-19T00:37:33 | 122,896,249 | 6,064 | 1,458 |
Apache-2.0
| 2023-01-19T00:37:35 | 2018-02-26T01:33:21 |
Python
|
UTF-8
|
Python
| false | false | 3,054 |
py
|
from gluoncv.auto.tasks import ImageClassification
from gluoncv.auto.tasks import ObjectDetection
import autogluon.core as ag
import time
from nose.tools import nottest
IMAGE_CLASS_DATASET, _, IMAGE_CLASS_TEST = ImageClassification.Dataset.from_folders(
'https://autogluon.s3.amazonaws.com/datasets/shopee-iet.zip')
OBJECT_DETCTION_DATASET = ObjectDetection.Dataset.from_voc('https://autogluon.s3.amazonaws.com/datasets/tiny_motorbike.zip')
OBJECT_DETECTION_TRAIN, OBJECT_DETECTION_VAL, OBJECT_DETECTION_TEST = OBJECT_DETCTION_DATASET.random_split(val_size=0.3, test_size=0.2)
def test_image_classification():
from gluoncv.auto.tasks import ImageClassification
task = ImageClassification({'model': 'resnet18_v1', 'num_trials': 1, 'epochs': 1, 'batch_size': 8})
classifier = task.fit(IMAGE_CLASS_DATASET)
assert task.fit_summary().get('valid_acc', 0) > 0
test_result = classifier.predict(IMAGE_CLASS_TEST)
def test_image_classification_custom_net():
from gluoncv.auto.tasks import ImageClassification
from gluoncv.model_zoo import get_model
net = get_model('resnet18_v1')
task = ImageClassification({'num_trials': 1, 'epochs': 1, 'custom_net': net, 'batch_size': 8})
classifier = task.fit(IMAGE_CLASS_DATASET)
assert task.fit_summary().get('valid_acc', 0) > 0
test_result = classifier.predict(IMAGE_CLASS_TEST)
def test_object_detection_estimator():
from gluoncv.auto.tasks import ObjectDetection
task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'batch_size': 4})
detector = task.fit(OBJECT_DETECTION_TRAIN)
assert task.fit_summary().get('valid_map', 0) > 0
test_result = detector.predict(OBJECT_DETECTION_TEST)
def test_object_detection_estimator_transfer():
from gluoncv.auto.tasks import ObjectDetection
task = ObjectDetection({'num_trials': 1, 'epochs': 1, 'transfer': ag.Categorical('yolo3_darknet53_coco', 'ssd_512_resnet50_v1_voc'), 'estimator': 'ssd', 'batch_size': 4})
detector = task.fit(OBJECT_DETECTION_TRAIN)
assert task.fit_summary().get('valid_map', 0) > 0
test_result = detector.predict(OBJECT_DETECTION_TEST)
import unittest
@unittest.skip("temporarily disabled")
def test_time_out_image_classification():
time_limit = 15
from gluoncv.auto.tasks import ImageClassification
task = ImageClassification({'num_trials': 1, 'epochs': 10, 'batch_size': 8})
tic = time.time()
classifier = task.fit(IMAGE_CLASS_DATASET, time_limit=time_limit)
# check time_limit with a little bit overhead
assert (time.time() - tic) < time_limit + 180
@unittest.skip("temporarily disabled")
def test_time_out_detection():
time_limit = 15
from gluoncv.auto.tasks import ObjectDetection
task = ObjectDetection({'num_trials': 1, 'epochs': 5, 'time_limits': time_limit, 'batch_size': 4})
tic = time.time()
detector = task.fit(OBJECT_DETECTION_TRAIN)
# check time_limit with a little bit overhead
assert (time.time() - tic) < time_limit + 180
if __name__ == '__main__':
import nose
nose.runmodule()
|
[
"[email protected]"
] | |
8b283162d9edbf8dca0e7c46dc70bd9b59e8967e
|
153fb205395605f631e92950fc86ba205bd85665
|
/wagtail/wagtailcore/blocks/__init__.py
|
1b7a1740cfd2dc54b81002c49016129a51f88256
|
[
"BSD-3-Clause"
] |
permissive
|
YoungSphere/Wagtail_Young
|
8e385ab37263acf4b609bb6aa1f75d3e9035eee0
|
536b137446ef5bff464cbe8a82175ba099d4a15a
|
refs/heads/master
| 2020-04-23T07:10:45.479469 | 2019-02-16T14:09:24 | 2019-02-16T14:09:24 | 170,998,679 | 0 | 0 |
BSD-3-Clause
| 2019-02-16T14:05:12 | 2019-02-16T11:55:39 |
Python
|
UTF-8
|
Python
| false | false | 334 |
py
|
from __future__ import absolute_import
# Import block types defined in submodules into the wagtail.wagtailcore.blocks namespace
from .base import * # NOQA
from .field_block import * # NOQA
from .struct_block import * # NOQA
from .list_block import * # NOQA
from .stream_block import * # NOQA
from .static_block import * # NOQA
|
[
"[email protected]"
] | |
56d15c6806ad5e594f7fc3174603378618fc75f9
|
ef187d259d33e97c7b9ed07dfbf065cec3e41f59
|
/work/atcoder/arc/arc086/D/answers/859172_ahho.py
|
9dd224c8cbab6c1535202a94895bc8989a7d1f82
|
[] |
no_license
|
kjnh10/pcw
|
847f7295ea3174490485ffe14ce4cdea0931c032
|
8f677701bce15517fb9362cc5b596644da62dca8
|
refs/heads/master
| 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 310 |
py
|
N = int(input())
A = list(map(int,input().split()))
# max absolute value
_,mi = max((abs(v),i) for i,v in enumerate(A))
mi += 1
print(2*N-2)
for i in range(1,N+1):
if i != mi:
print(mi,i)
if A[mi-1] > 0:
for i in range(1,N):
print(i,i+1)
else:
for i in reversed(range(1,N)):
print(i+1,i)
|
[
"[email protected]"
] | |
7a88d194dc4f6647d5d15e898799151518385985
|
f131222013fd1c23bf23a9af44dbaf2cd2dfbe72
|
/python 好用库/lib/dundeemt-pysftp-ad3aefc8ec42/dundeemt-pysftp-ad3aefc8ec42/tests/test_walktree.py
|
467b7fe50b5df8c3aaf901fec09749cb730bcb4a
|
[] |
no_license
|
shortdistance/workdir
|
e0bdadcb9d6b5e61e62434d574afad36afa60ba9
|
7c4a23fdbb8ae14b67aeda47ce53be1bd24ae2d1
|
refs/heads/master
| 2021-01-19T23:21:40.885964 | 2017-04-21T12:55:45 | 2017-04-21T12:55:45 | 88,968,923 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,680 |
py
|
'''test pysftp.Connection.walktree and pysftp.walktree - uses py.test'''
from __future__ import print_function
# pylint: disable = W0142
from common import *
from mock import Mock, call
def test_walktree_cbclass():
'''test the walktree function with callbacks from a class'''
with pysftp.Connection(**SFTP_PUBLIC) as sftp:
wtcb = pysftp.WTCallbacks()
sftp.walktree('.',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb)
assert './pub/build/build01/build01a/build-results.txt' in wtcb.flist
assert './readme.txt' in wtcb.flist
assert len(wtcb.flist) > 3
dlist = ['./pub', './pub/build', './pub/build/build01',
'./pub/build/build01/build01a', './pub/build/build01/build01b',
'./pub/build/build01/build01c', './pub/example', './pub/src',
'./pub/src/libs', './pub/src/media', './pub/src/tests']
assert wtcb.dlist == dlist
assert wtcb.ulist == []
def test_walktree_cbmock():
'''test the walktree function, with mocked callbacks (standalone functions)
'''
file_cb = Mock(return_value=None)
dir_cb = Mock(return_value=None)
unk_cb = Mock(return_value=None)
with pysftp.Connection(**SFTP_PUBLIC) as sftp:
sftp.walktree('.',
fcallback=file_cb,
dcallback=dir_cb,
ucallback=unk_cb)
# check calls to the file callback
file_cb.assert_called_with('./readme.txt')
thecall = call('./pub/build/build01/build01a/build-results.txt')
assert thecall in file_cb.mock_calls
assert file_cb.call_count > 3
# check calls to the directory callback
assert [call('./pub'),
call('./pub/build'),
call('./pub/build/build01'),
call('./pub/build/build01/build01a'),
call('./pub/build/build01/build01b'),
call('./pub/build/build01/build01c'),
call('./pub/example'),
call('./pub/src'),
call('./pub/src/libs'),
call('./pub/src/media'),
call('./pub/src/tests')] == dir_cb.mock_calls
# check calls to the unknown callback
assert [] == unk_cb.mock_calls
def test_walktree_no_recurse():
'''test the walktree function, with mocked callbacks (standalone functions)
'''
file_cb = Mock(return_value=None)
dir_cb = Mock(return_value=None)
unk_cb = Mock(return_value=None)
with pysftp.Connection(**SFTP_PUBLIC) as sftp:
sftp.walktree('.',
fcallback=file_cb,
dcallback=dir_cb,
ucallback=unk_cb,
recurse=False)
# check calls to the file callback
file_cb.assert_called_with('./readme.txt')
thecall = call('./readme.sym')
assert thecall in file_cb.mock_calls
assert file_cb.call_count == 2
# check calls to the directory callback
assert [call('./pub'),] == dir_cb.mock_calls
# check calls to the unknown callback
assert [] == unk_cb.mock_calls
def test_walktree_local():
'''test the capability of walktree to walk a local directory structure'''
wtcb = pysftp.WTCallbacks()
pysftp.walktree('.',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb)
print(wtcb.dlist)
for dname in ['./docs', './tests']:
assert dname in wtcb.dlist
print(wtcb.ulist)
assert wtcb.ulist == []
print(wtcb.flist)
for fname in ['./release.sh', './MANIFEST.in', './tests/test_execute.py']:
assert fname in wtcb.flist
def test_walktree_local_no_recurse():
'''test the capability of walktree with recurse=False to walk a local
directory structure'''
wtcb = pysftp.WTCallbacks()
pysftp.walktree('.',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb,
recurse=False)
print(wtcb.dlist)
for dname in ['./docs', './tests']:
assert dname in wtcb.dlist
print(wtcb.ulist)
assert wtcb.ulist == []
print(wtcb.flist)
for fname in ['./release.sh', './MANIFEST.in']:
assert fname in wtcb.flist
assert './tests/test_execute.py' not in wtcb.flist
def test_walktree_local_bad():
'''test pysftp.walktree on a non-existing directory'''
wtcb = pysftp.WTCallbacks()
with pytest.raises(OSError):
pysftp.walktree('/non-existing',
fcallback=wtcb.file_cb,
dcallback=wtcb.dir_cb,
ucallback=wtcb.unk_cb)
|
[
"[email protected]"
] | |
0ac8b6307780f483b0873e5bd0c05380268fc89b
|
987390ca6481ec5aa2b9e0e0e849203b6c22ce62
|
/zkeco-core/adms/memcache.py
|
a431607921f0052748ffb1b18325a987af26034a
|
[] |
no_license
|
alungboy/johan-doc
|
81b2363e7f2ad189d0623007eea66233a2e18f1c
|
7ced14577405caf6127df03007619fe9cfda3847
|
refs/heads/master
| 2020-04-03T18:01:08.531971 | 2013-08-13T04:26:42 | 2013-08-13T04:26:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 36,738 |
py
|
#!/usr/bin/env python
"""
client module for memcached (memory cache daemon)
Overview
========
See U{the MemCached homepage<http://www.danga.com/memcached>} for more about memcached.
Usage summary
=============
This should give you a feel for how this module operates::
import memcache
mc = memcache.Client(['127.0.0.1:11211'], debug=0)
mc.set("some_key", "Some value")
value = mc.get("some_key")
mc.set("another_key", 3)
mc.delete("another_key")
mc.set("key", "1") # note that the key used for incr/decr must be a string.
mc.incr("key")
mc.decr("key")
The standard way to use memcache with a database is like this::
key = derive_key(obj)
obj = mc.get(key)
if not obj:
obj = backend_api.get(...)
mc.set(obj)
# we now have obj, and future passes through this code
# will use the object from the cache.
Detailed Documentation
======================
More detailed documentation is available in the L{Client} class.
"""
import sys
import socket
import time
import types
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from zlib import compress, decompress
_supports_compress = True
except ImportError:
_supports_compress = False
# quickly define a decompress just in case we recv compressed data.
def decompress(val):
raise _Error("received compressed data but I don't support compession (import error)")
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from binascii import crc32 # zlib version is not cross-platform
serverHashFunction = crc32
__author__ = "Evan Martin <[email protected]>"
__version__ = "1.40"
__copyright__ = "Copyright (C) 2003 Danga Interactive"
__license__ = "Python"
SERVER_MAX_KEY_LENGTH = 250
# Storing values larger than 1MB requires recompiling memcached. If you do,
# this value can be changed by doing "memcache.SERVER_MAX_VALUE_LENGTH = N"
# after importing this module.
SERVER_MAX_VALUE_LENGTH = 1024*1024
class _Error(Exception):
pass
try:
# Only exists in Python 2.4+
from threading import local
except ImportError:
# TODO: add the pure-python local implementation
class local(object):
pass
class Client(local):
"""
Object representing a pool of memcache servers.
See L{memcache} for an overview.
In all cases where a key is used, the key can be either:
1. A simple hashable type (string, integer, etc.).
2. A tuple of C{(hashvalue, key)}. This is useful if you want to avoid
making this module calculate a hash value. You may prefer, for
example, to keep all of a given user's objects on the same memcache
server, so you could use the user's unique id as the hash value.
@group Setup: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog
@group Insertion: set, add, replace, set_multi
@group Retrieval: get, get_multi
@group Integers: incr, decr
@group Removal: delete, delete_multi
@sort: __init__, set_servers, forget_dead_hosts, disconnect_all, debuglog,\
set, set_multi, add, replace, get, get_multi, incr, decr, delete, delete_multi
"""
_FLAG_PICKLE = 1<<0
_FLAG_INTEGER = 1<<1
_FLAG_LONG = 1<<2
_FLAG_COMPRESSED = 1<<3
_SERVER_RETRIES = 10 # how many times to try finding a free server.
# exceptions for Client
class MemcachedKeyError(Exception):
pass
class MemcachedKeyLengthError(MemcachedKeyError):
pass
class MemcachedKeyCharacterError(MemcachedKeyError):
pass
class MemcachedStringEncodingError(Exception):
pass
def __init__(self, servers, debug=0, pickleProtocol=0,
pickler=pickle.Pickler, unpickler=pickle.Unpickler,
pload=None, pid=None):
"""
Create a new Client object with the given list of servers.
@param servers: C{servers} is passed to L{set_servers}.
@param debug: whether to display error messages when a server can't be
contacted.
@param pickleProtocol: number to mandate protocol used by (c)Pickle.
@param pickler: optional override of default Pickler to allow subclassing.
@param unpickler: optional override of default Unpickler to allow subclassing.
@param pload: optional persistent_load function to call on pickle loading.
Useful for cPickle since subclassing isn't allowed.
@param pid: optional persistent_id function to call on pickle storing.
Useful for cPickle since subclassing isn't allowed.
"""
local.__init__(self)
self.set_servers(servers)
self.debug = debug
self.stats = {}
# Allow users to modify pickling/unpickling behavior
self.pickleProtocol = pickleProtocol
self.pickler = pickler
self.unpickler = unpickler
self.persistent_load = pload
self.persistent_id = pid
def set_servers(self, servers):
"""
Set the pool of servers used by this client.
@param servers: an array of servers.
Servers can be passed in two forms:
1. Strings of the form C{"host:port"}, which implies a default weight of 1.
2. Tuples of the form C{("host:port", weight)}, where C{weight} is
an integer weight value.
"""
self.servers = [_Host(s, self.debuglog) for s in servers]
self._init_buckets()
def get_stats(self):
'''Get statistics from each of the servers.
@return: A list of tuples ( server_identifier, stats_dictionary ).
The dictionary contains a number of name/value pairs specifying
the name of the status field and the string value associated with
it. The values are not converted from strings.
'''
data = []
for s in self.servers:
if not s.connect(): continue
name = '%s:%s (%s)' % ( s.ip, s.port, s.weight )
s.send_cmd('stats')
serverData = {}
data.append(( name, serverData ))
readline = s.readline
while 1:
line = readline()
if not line or line.strip() == 'END': break
stats = line.split(' ', 2)
serverData[stats[1]] = stats[2]
return(data)
def flush_all(self):
'Expire all data currently in the memcache servers.'
for s in self.servers:
if not s.connect(): continue
s.send_cmd('flush_all')
s.expect("OK")
def debuglog(self, str):
if self.debug:
sys.stderr.write("MemCached: %s\n" % str)
def _statlog(self, func):
if not self.stats.has_key(func):
self.stats[func] = 1
else:
self.stats[func] += 1
def forget_dead_hosts(self):
"""
Reset every host in the pool to an "alive" state.
"""
for s in self.servers:
s.dead_until = 0
def _init_buckets(self):
self.buckets = []
for server in self.servers:
for i in range(server.weight):
self.buckets.append(server)
def _get_server(self, key):
if type(key) == types.TupleType:
serverhash, key = key
else:
serverhash = serverHashFunction(key)
for i in range(Client._SERVER_RETRIES):
server = self.buckets[serverhash % len(self.buckets)]
if server.connect():
#print "(using server %s)" % server,
return server, key
serverhash = serverHashFunction(str(serverhash) + str(i))
return None, None
def disconnect_all(self):
for s in self.servers:
s.close_socket()
def delete_multi(self, keys, seconds=0, key_prefix=''):
'''
Delete multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
>>> mc.delete_multi(['key1', 'key2'])
1
>>> mc.get_multi(['key1', 'key2']) == {}
1
This method is recommended over iterated regular L{delete}s as it reduces total latency, since
your app doesn't have to wait for each round-trip of L{delete} before sending
the next one.
@param keys: An iterable of keys to clear
@param seconds: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@param key_prefix: Optional string to prepend to each key when sending to memcache.
See docs for L{get_multi} and L{set_multi}.
@return: 1 if no failure in communication with any memcacheds.
@rtype: int
'''
self._statlog('delete_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
rc = 1
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
if time != None:
for key in server_keys[server]: # These are mangled keys
write("delete %s %d\r\n" % (key, seconds))
else:
for key in server_keys[server]: # These are mangled keys
write("delete %s\r\n" % key)
try:
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
rc = 0
server.mark_dead(msg[1])
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
notstored = [] # original keys.
for server, keys in server_keys.iteritems():
try:
for key in keys:
server.expect("DELETED")
except socket.error, msg:
server.mark_dead(msg)
rc = 0
return rc
def delete(self, key, time=0):
'''Deletes a key from the memcache.
@return: Nonzero on success.
@param seconds: number of seconds any subsequent set / update commands should fail. Defaults to 0 for no delay.
@rtype: int
'''
check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog('delete')
if time != None:
cmd = "delete %s %d" % (key, time)
else:
cmd = "delete %s" % key
try:
server.send_cmd(cmd)
server.expect("DELETED")
except socket.error, msg:
server.mark_dead(msg[1])
return 0
return 1
def incr(self, key, delta=1):
"""
Sends a command to the server to atomically increment the value for C{key} by
C{delta}, or by 1 if C{delta} is unspecified. Returns None if C{key} doesn't
exist on server, otherwise it returns the new value after incrementing.
Note that the value for C{key} must already exist in the memcache, and it
must be the string representation of an integer.
>>> mc.set("counter", "20") # returns 1, indicating success
1
>>> mc.incr("counter")
21
>>> mc.incr("counter")
22
Overflow on server is not checked. Be aware of values approaching
2**32. See L{decr}.
@param delta: Integer amount to increment by (should be zero or greater).
@return: New value after incrementing.
@rtype: int
"""
return self._incrdecr("incr", key, delta)
def decr(self, key, delta=1):
"""
Like L{incr}, but decrements. Unlike L{incr}, underflow is checked and
new values are capped at 0. If server value is 1, a decrement of 2
returns 0, not -1.
@param delta: Integer amount to decrement by (should be zero or greater).
@return: New value after decrementing.
@rtype: int
"""
return self._incrdecr("decr", key, delta)
def _incrdecr(self, cmd, key, delta):
check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
cmd = "%s %s %d" % (cmd, key, delta)
try:
server.send_cmd(cmd)
line = server.readline()
return int(line)
except socket.error, msg:
server.mark_dead(msg[1])
return None
def add(self, key, val, time = 0, min_compress_len = 0):
'''
Add new key with value.
Like L{set}, but only stores in memcache if the key doesn't already exist.
@return: Nonzero on success.
@rtype: int
'''
return self._set("add", key, val, time, min_compress_len)
def replace(self, key, val, time=0, min_compress_len=0):
'''Replace existing key with value.
Like L{set}, but only stores in memcache if the key already exists.
The opposite of L{add}.
@return: Nonzero on success.
@rtype: int
'''
return self._set("replace", key, val, time, min_compress_len)
def set(self, key, val, time=0, min_compress_len=0):
'''Unconditionally sets a key to a given value in the memcache.
The C{key} can optionally be an tuple, with the first element
being the server hash value and the second being the key.
If you want to avoid making this module calculate a hash value.
You may prefer, for example, to keep all of a given user's objects
on the same memcache server, so you could use the user's unique
id as the hash value.
@return: Nonzero on success.
@rtype: int
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
'''
return self._set("set", key, val, time, min_compress_len)
def _map_and_prefix_keys(self, key_iterable, key_prefix):
"""Compute the mapping of server (_Host instance) -> list of keys to stuff onto that server, as well as the mapping of
prefixed key -> original key.
"""
# Check it just once ...
key_extra_len=len(key_prefix)
if key_prefix:
check_key(key_prefix)
# server (_Host) -> list of unprefixed server keys in mapping
server_keys = {}
prefixed_to_orig_key = {}
# build up a list for each server of all the keys we want.
for orig_key in key_iterable:
if type(orig_key) is types.TupleType:
# Tuple of hashvalue, key ala _get_server(). Caller is essentially telling us what server to stuff this on.
# Ensure call to _get_server gets a Tuple as well.
str_orig_key = str(orig_key[1])
server, key = self._get_server((orig_key[0], key_prefix + str_orig_key)) # Gotta pre-mangle key before hashing to a server. Returns the mangled key.
else:
str_orig_key = str(orig_key) # set_multi supports int / long keys.
server, key = self._get_server(key_prefix + str_orig_key)
# Now check to make sure key length is proper ...
check_key(str_orig_key, key_extra_len=key_extra_len)
if not server:
continue
if not server_keys.has_key(server):
server_keys[server] = []
server_keys[server].append(key)
prefixed_to_orig_key[key] = orig_key
return (server_keys, prefixed_to_orig_key)
def set_multi(self, mapping, time=0, key_prefix='', min_compress_len=0):
'''
Sets multiple keys in the memcache doing just one query.
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'})
>>> mc.get_multi(['key1', 'key2']) == {'key1' : 'val1', 'key2' : 'val2'}
1
This method is recommended over regular L{set} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{set} before sending
the next one.
@param mapping: A dict of key/value pairs to set.
@param time: Tells memcached the time which this value should expire, either
as a delta number of seconds, or an absolute unix time-since-the-epoch
value. See the memcached protocol docs section "Storage Commands"
for more info on <exptime>. We default to 0 == cache forever.
@param key_prefix: Optional string to prepend to each key when sending to memcache. Allows you to efficiently stuff these keys into a pseudo-namespace in memcache:
>>> notset_keys = mc.set_multi({'key1' : 'val1', 'key2' : 'val2'}, key_prefix='subspace_')
>>> len(notset_keys) == 0
True
>>> mc.get_multi(['subspace_key1', 'subspace_key2']) == {'subspace_key1' : 'val1', 'subspace_key2' : 'val2'}
True
Causes key 'subspace_key1' and 'subspace_key2' to be set. Useful in conjunction with a higher-level layer which applies namespaces to data in memcache.
In this case, the return result would be the list of notset original keys, prefix not applied.
@param min_compress_len: The threshold length to kick in auto-compression
of the value using the zlib.compress() routine. If the value being cached is
a string, then the length of the string is measured, else if the value is an
object, then the length of the pickle result is measured. If the resulting
attempt at compression yeilds a larger string than the input, then it is
discarded. For backwards compatability, this parameter defaults to 0,
indicating don't ever try to compress.
@return: List of keys which failed to be stored [ memcache out of memory, etc. ].
@rtype: list
'''
self._statlog('set_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(mapping.iterkeys(), key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
bigcmd = []
write = bigcmd.append
try:
for key in server_keys[server]: # These are mangled keys
store_info = self._val_to_store_info(mapping[prefixed_to_orig_key[key]], min_compress_len)
write("set %s %d %d %d\r\n%s\r\n" % (key, store_info[0], time, store_info[1], store_info[2]))
server.send_cmds(''.join(bigcmd))
except socket.error, msg:
server.mark_dead(msg[1])
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
notstored = [] # original keys.
for server, keys in server_keys.iteritems():
try:
for key in keys:
line = server.readline()
if line == 'STORED':
continue
else:
notstored.append(prefixed_to_orig_key[key]) #un-mangle.
except (_Error, socket.error), msg:
server.mark_dead(msg)
return notstored
def _val_to_store_info(self, val, min_compress_len):
"""
Transform val to a storable representation, returning a tuple of the flags, the length of the new value, and the new value itself.
"""
flags = 0
if isinstance(val, str):
pass
elif isinstance(val, int):
flags |= Client._FLAG_INTEGER
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
elif isinstance(val, long):
flags |= Client._FLAG_LONG
val = "%d" % val
# force no attempt to compress this silly string.
min_compress_len = 0
else:
flags |= Client._FLAG_PICKLE
file = StringIO()
pickler = self.pickler(file, protocol=self.pickleProtocol)
if self.persistent_id:
pickler.persistent_id = self.persistent_id
pickler.dump(val)
val = file.getvalue()
# silently do not store if value length exceeds maximum
if len(val) >= SERVER_MAX_VALUE_LENGTH: return(0)
lv = len(val)
# We should try to compress if min_compress_len > 0 and we could import zlib and this string is longer than our min threshold.
if min_compress_len and _supports_compress and lv > min_compress_len:
comp_val = compress(val)
#Only retain the result if the compression result is smaller than the original.
if len(comp_val) < lv:
flags |= Client._FLAG_COMPRESSED
val = comp_val
return (flags, len(val), val)
def _set(self, cmd, key, val, time, min_compress_len = 0):
check_key(key)
server, key = self._get_server(key)
if not server:
return 0
self._statlog(cmd)
store_info = self._val_to_store_info(val, min_compress_len)
if not store_info: return(0)
fullcmd = "%s %s %d %d %d\r\n%s" % (cmd, key, store_info[0], time, store_info[1], store_info[2])
try:
server.send_cmd(fullcmd)
return(server.expect("STORED") == "STORED")
except socket.error, msg:
server.mark_dead(msg[1])
return 0
def get(self, key):
'''Retrieves a key from the memcache.
@return: The value or None.
'''
check_key(key)
server, key = self._get_server(key)
if not server:
return None
self._statlog('get')
try:
server.send_cmd("get %s" % key)
rkey, flags, rlen, = self._expectvalue(server)
if not rkey:
return None
value = self._recv_value(server, flags, rlen)
server.expect("END")
except (_Error, socket.error), msg:
if type(msg) is types.TupleType:
msg = msg[1]
server.mark_dead(msg)
return None
return value
def get_multi(self, keys, key_prefix=''):
'''
Retrieves multiple keys from the memcache doing just one query.
>>> success = mc.set("foo", "bar")
>>> success = mc.set("baz", 42)
>>> mc.get_multi(["foo", "baz", "foobar"]) == {"foo": "bar", "baz": 42}
1
>>> mc.set_multi({'k1' : 1, 'k2' : 2}, key_prefix='pfx_') == []
1
This looks up keys 'pfx_k1', 'pfx_k2', ... . Returned dict will just have unprefixed keys 'k1', 'k2'.
>>> mc.get_multi(['k1', 'k2', 'nonexist'], key_prefix='pfx_') == {'k1' : 1, 'k2' : 2}
1
get_mult [ and L{set_multi} ] can take str()-ables like ints / longs as keys too. Such as your db pri key fields.
They're rotored through str() before being passed off to memcache, with or without the use of a key_prefix.
In this mode, the key_prefix could be a table name, and the key itself a db primary key number.
>>> mc.set_multi({42: 'douglass adams', 46 : 'and 2 just ahead of me'}, key_prefix='numkeys_') == []
1
>>> mc.get_multi([46, 42], key_prefix='numkeys_') == {42: 'douglass adams', 46 : 'and 2 just ahead of me'}
1
This method is recommended over regular L{get} as it lowers the number of
total packets flying around your network, reducing total latency, since
your app doesn't have to wait for each round-trip of L{get} before sending
the next one.
See also L{set_multi}.
@param keys: An array of keys.
@param key_prefix: A string to prefix each key when we communicate with memcache.
Facilitates pseudo-namespaces within memcache. Returned dictionary keys will not have this prefix.
@return: A dictionary of key/value pairs that were available. If key_prefix was provided, the keys in the retured dictionary will not have it present.
'''
self._statlog('get_multi')
server_keys, prefixed_to_orig_key = self._map_and_prefix_keys(keys, key_prefix)
# send out all requests on each server before reading anything
dead_servers = []
for server in server_keys.iterkeys():
try:
server.send_cmd("get %s" % " ".join(server_keys[server]))
except socket.error, msg:
server.mark_dead(msg[1])
dead_servers.append(server)
# if any servers died on the way, don't expect them to respond.
for server in dead_servers:
del server_keys[server]
retvals = {}
for server in server_keys.iterkeys():
try:
line = server.readline()
while line and line != 'END':
rkey, flags, rlen = self._expectvalue(server, line)
# Bo Yang reports that this can sometimes be None
if rkey is not None:
val = self._recv_value(server, flags, rlen)
retvals[prefixed_to_orig_key[rkey]] = val # un-prefix returned key.
line = server.readline()
except (_Error, socket.error), msg:
server.mark_dead(msg)
return retvals
def _expectvalue(self, server, line=None):
if not line:
line = server.readline()
if line[:5] == 'VALUE':
resp, rkey, flags, len = line.split()
flags = int(flags)
rlen = int(len)
return (rkey, flags, rlen)
else:
return (None, None, None)
def _recv_value(self, server, flags, rlen):
rlen += 2 # include \r\n
buf = server.recv(rlen)
if len(buf) != rlen:
raise _Error("received %d bytes when expecting %d" % (len(buf), rlen))
if len(buf) == rlen:
buf = buf[:-2] # strip \r\n
if flags & Client._FLAG_COMPRESSED:
buf = decompress(buf)
if flags == 0 or flags == Client._FLAG_COMPRESSED:
# Either a bare string or a compressed string now decompressed...
val = buf
elif flags & Client._FLAG_INTEGER:
val = int(buf)
elif flags & Client._FLAG_LONG:
val = long(buf)
elif flags & Client._FLAG_PICKLE:
try:
file = StringIO(buf)
unpickler = self.unpickler(file)
if self.persistent_load:
unpickler.persistent_load = self.persistent_load
val = unpickler.load()
except Exception, e:
self.debuglog('Pickle error: %s\n' % e)
val = None
else:
self.debuglog("unknown flags on get: %x\n" % flags)
return val
class _Host:
_DEAD_RETRY = 30 # number of seconds before retrying a dead server.
def __init__(self, host, debugfunc=None):
if isinstance(host, types.TupleType):
host, self.weight = host
else:
self.weight = 1
if host.find(":") > 0:
self.ip, self.port = host.split(":")
self.port = int(self.port)
else:
self.ip, self.port = host, 11211
if not debugfunc:
debugfunc = lambda x: x
self.debuglog = debugfunc
self.deaduntil = 0
self.socket = None
self.buffer = ''
def _check_dead(self):
if self.deaduntil and self.deaduntil > time.time():
return 1
self.deaduntil = 0
return 0
def connect(self):
if self._get_socket():
return 1
return 0
def mark_dead(self, reason):
self.debuglog("MemCache: %s: %s. Marking dead." % (self, reason))
self.deaduntil = time.time() + _Host._DEAD_RETRY
self.close_socket()
def _get_socket(self):
if self._check_dead():
return None
if self.socket:
return self.socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Python 2.3-ism: s.settimeout(1)
try:
s.connect((self.ip, self.port))
except socket.error, msg:
self.mark_dead("connect: %s" % msg[1])
return None
self.socket = s
self.buffer = ''
return s
def close_socket(self):
if self.socket:
self.socket.close()
self.socket = None
def send_cmd(self, cmd):
self.socket.sendall(cmd + '\r\n')
def send_cmds(self, cmds):
""" cmds already has trailing \r\n's applied """
self.socket.sendall(cmds)
def readline(self):
buf = self.buffer
recv = self.socket.recv
while True:
index = buf.find('\r\n')
if index >= 0:
break
data = recv(4096)
if not data:
self.mark_dead('Connection closed while reading from %s'
% repr(self))
break
buf += data
if index >= 0:
self.buffer = buf[index+2:]
buf = buf[:index]
else:
self.buffer = ''
return buf
def expect(self, text):
line = self.readline()
if line != text:
self.debuglog("while expecting '%s', got unexpected response '%s'" % (text, line))
return line
def recv(self, rlen):
self_socket_recv = self.socket.recv
buf = self.buffer
while len(buf) < rlen:
foo = self_socket_recv(4096)
buf += foo
if len(foo) == 0:
raise _Error, ( 'Read %d bytes, expecting %d, '
'read returned 0 length bytes' % ( len(buf), foo ))
self.buffer = buf[rlen:]
return buf[:rlen]
def __str__(self):
d = ''
if self.deaduntil:
d = " (dead until %d)" % self.deaduntil
return "%s:%d%s" % (self.ip, self.port, d)
def check_key(key, key_extra_len=0):
"""Checks sanity of key. Fails if:
Key length is > SERVER_MAX_KEY_LENGTH (Raises MemcachedKeyLength).
Contains control characters (Raises MemcachedKeyCharacterError).
Is not a string (Raises MemcachedStringEncodingError)
"""
if type(key) == types.TupleType: key = key[1]
if not isinstance(key, str):
raise Client.MemcachedStringEncodingError, ("Keys must be str()'s, not"
"unicode. Convert your unicode strings using "
"mystring.encode(charset)!")
if isinstance(key, basestring):
if len(key) + key_extra_len > SERVER_MAX_KEY_LENGTH:
raise Client.MemcachedKeyLengthError, ("Key length is > %s"
% SERVER_MAX_KEY_LENGTH)
for char in key:
if ord(char) < 33 or ord(char) == 127:
raise Client.MemcachedKeyCharacterError, "Control characters not allowed"
def _doctest():
import doctest, memcache
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
globs = {"mc": mc}
return doctest.testmod(memcache, globs=globs)
if __name__ == "__main__":
print "Testing docstrings..."
_doctest()
print "Running tests:"
print
#servers = ["127.0.0.1:11211", "127.0.0.1:11212"]
servers = ["127.0.0.1:11211"]
mc = Client(servers, debug=1)
def to_s(val):
if not isinstance(val, types.StringTypes):
return "%s (%s)" % (val, type(val))
return "%s" % val
def test_setget(key, val):
print "Testing set/get {'%s': %s} ..." % (to_s(key), to_s(val)),
mc.set(key, val)
newval = mc.get(key)
if newval == val:
print "OK"
return 1
else:
print "FAIL"
return 0
class FooStruct:
def __init__(self):
self.bar = "baz"
def __str__(self):
return "A FooStruct"
def __eq__(self, other):
if isinstance(other, FooStruct):
return self.bar == other.bar
return 0
test_setget("a_string", "some random string")
test_setget("an_integer", 42)
if test_setget("long", long(1<<30)):
print "Testing delete ...",
if mc.delete("long"):
print "OK"
else:
print "FAIL"
print "Testing get_multi ...",
print mc.get_multi(["a_string", "an_integer"])
print "Testing get(unknown value) ...",
print to_s(mc.get("unknown_value"))
f = FooStruct()
test_setget("foostruct", f)
print "Testing incr ...",
x = mc.incr("an_integer", 1)
if x == 43:
print "OK"
else:
print "FAIL"
print "Testing decr ...",
x = mc.decr("an_integer", 1)
if x == 42:
print "OK"
else:
print "FAIL"
# sanity tests
print "Testing sending spaces...",
try:
x = mc.set("this has spaces", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"
print "Testing sending control characters...",
try:
x = mc.set("this\x10has\x11control characters\x02", 1)
except Client.MemcachedKeyCharacterError, msg:
print "OK"
else:
print "FAIL"
print "Testing using insanely long key...",
try:
x = mc.set('a'*SERVER_MAX_KEY_LENGTH + 'aaaa', 1)
except Client.MemcachedKeyLengthError, msg:
print "OK"
else:
print "FAIL"
print "Testing sending a unicode-string key...",
try:
x = mc.set(u'keyhere', 1)
except Client.MemcachedStringEncodingError, msg:
print "OK",
else:
print "FAIL",
try:
x = mc.set((u'a'*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except:
print "FAIL",
else:
print "OK",
import pickle
s = pickle.loads('V\\u4f1a\np0\n.')
try:
x = mc.set((s*SERVER_MAX_KEY_LENGTH).encode('utf-8'), 1)
except Client.MemcachedKeyLengthError:
print "OK"
else:
print "FAIL"
print "Testing using a value larger than the memcached value limit...",
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH)
if mc.get('keyhere') == None:
print "OK",
else:
print "FAIL",
x = mc.set('keyhere', 'a'*SERVER_MAX_VALUE_LENGTH + 'aaa')
if mc.get('keyhere') == None:
print "OK"
else:
print "FAIL"
# vim: ts=4 sw=4 et :
|
[
"xiongjianhong@a5b1b082-b159-ab12-9a9e-d79e7dfc8adf"
] |
xiongjianhong@a5b1b082-b159-ab12-9a9e-d79e7dfc8adf
|
8e53fc2821f50c18518010717b0e82b25950cac2
|
89155ebee895cbd04e4eb7a9d079a820d90ffd7e
|
/viewset_modelviewset_application/viewset_modelviewset_application/settings.py
|
9861cd4ac9f55a125dc627f00b6bae60651a2efc
|
[] |
no_license
|
mahmudgithub/Rest-api-playground
|
822c0671b534fc057461703711ef980d9d31ce56
|
a452a329d60c9104afdeadde13f7493741e4914a
|
refs/heads/master
| 2023-03-31T17:23:13.605754 | 2021-04-11T14:10:31 | 2021-04-11T14:10:31 | 331,842,045 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,851 |
py
|
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9lzw6zp^-1(=b#u!$w%7x(7_$7alx_nrvz4kd+gkl$&1q1!%m2'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
'rest_framework'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'viewset_modelviewset_application.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'viewset_modelviewset_application.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
adb170a554dd4d70c7c27dcdfa73e45fe7a48dd5
|
63304bd3fd27aca73e949579a732e183ba0c88af
|
/httprunner/client.py
|
fb3161acdfbff488d6136512fa89db89b25b4442
|
[
"MIT"
] |
permissive
|
janice1027/HttpRunner
|
330de17485654041cf2c07022c8860364d742362
|
7fa1057f1675dc73640bb90c4a22e8811153226a
|
refs/heads/master
| 2021-04-27T11:50:58.682691 | 2018-02-22T10:53:24 | 2018-02-22T10:53:24 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,481 |
py
|
import json
import re
import time
import requests
import urllib3
from httprunner import logger
from httprunner.exception import ParamsError
from requests import Request, Response
from requests.exceptions import (InvalidSchema, InvalidURL, MissingSchema,
RequestException)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
absolute_http_url_regexp = re.compile(r"^https?://", re.I)
def prepare_kwargs(method, kwargs):
if method == "POST":
# if request content-type is application/json, request data should be dumped
content_type = kwargs.get("headers", {}).get("content-type", "")
if content_type.startswith("application/json") and "data" in kwargs:
kwargs["data"] = json.dumps(kwargs["data"])
class ApiResponse(Response):
def raise_for_status(self):
if hasattr(self, 'error') and self.error:
raise self.error
Response.raise_for_status(self)
class HttpSession(requests.Session):
"""
Class for performing HTTP requests and holding (session-) cookies between requests (in order
to be able to log in and out of websites). Each request is logged so that HttpRunner can
display statistics.
This is a slightly extended version of `python-request <http://python-requests.org>`_'s
:py:class:`requests.Session` class and mostly this class works exactly the same. However
the methods for making requests (get, post, delete, put, head, options, patch, request)
can now take a *url* argument that's only the path part of the URL, in which case the host
part of the URL will be prepended with the HttpSession.base_url which is normally inherited
from a HttpRunner class' host property.
"""
def __init__(self, base_url=None, *args, **kwargs):
super(HttpSession, self).__init__(*args, **kwargs)
self.base_url = base_url if base_url else ""
def _build_url(self, path):
""" prepend url with hostname unless it's already an absolute URL """
if absolute_http_url_regexp.match(path):
return path
elif self.base_url:
return "%s%s" % (self.base_url, path)
else:
raise ParamsError("base url missed!")
def request(self, method, url, name=None, **kwargs):
"""
Constructs and sends a :py:class:`requests.Request`.
Returns :py:class:`requests.Response` object.
:param method:
method for the new :class:`Request` object.
:param url:
URL for the new :class:`Request` object.
:param name: (optional)
Placeholder, make compatible with Locust's HttpSession
:param params: (optional)
Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional)
Dictionary or bytes to send in the body of the :class:`Request`.
:param headers: (optional)
Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional)
Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional)
Dictionary of ``'filename': file-like-objects`` for multipart encoding upload.
:param auth: (optional)
Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional)
How long to wait for the server to send data before giving up, as a float, or \
a (`connect timeout, read timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional)
Set to True by default.
:type allow_redirects: bool
:param proxies: (optional)
Dictionary mapping protocol to the URL of the proxy.
:param stream: (optional)
whether to immediately download the response content. Defaults to ``False``.
:param verify: (optional)
if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param cert: (optional)
if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
"""
# prepend url with hostname unless it's already an absolute URL
url = self._build_url(url)
logger.log_info("{method} {url}".format(method=method, url=url))
logger.log_debug("request kwargs: {kwargs}".format(kwargs=kwargs))
# store meta data that is used when reporting the request to locust's statistics
request_meta = {}
# set up pre_request hook for attaching meta data to the request object
request_meta["method"] = method
request_meta["start_time"] = time.time()
if "httpntlmauth" in kwargs:
from requests_ntlm import HttpNtlmAuth
auth_account = kwargs.pop("httpntlmauth")
kwargs["auth"] = HttpNtlmAuth(
auth_account["username"], auth_account["password"])
kwargs.setdefault("timeout", 120)
response = self._send_request_safe_mode(method, url, **kwargs)
request_meta["url"] = (response.history and response.history[0] or response)\
.request.path_url
# record the consumed time
request_meta["response_time"] = int((time.time() - request_meta["start_time"]) * 1000)
# get the length of the content, but if the argument stream is set to True, we take
# the size from the content-length header, in order to not trigger fetching of the body
if kwargs.get("stream", False):
request_meta["content_size"] = int(response.headers.get("content-length") or 0)
else:
request_meta["content_size"] = len(response.content or "")
request_meta["request_headers"] = response.request.headers
request_meta["request_body"] = response.request.body
request_meta["status_code"] = response.status_code
request_meta["response_headers"] = response.headers
request_meta["response_content"] = response.content
logger.log_debug("response status_code: {}".format(response.status_code))
logger.log_debug("response headers: {}".format(response.headers))
logger.log_debug("response body: {}".format(response.text))
try:
response.raise_for_status()
except RequestException as e:
logger.log_error(u"{exception}".format(exception=str(e)))
else:
logger.log_info(
"""status_code: {}, response_time: {} ms, response_length: {} bytes"""\
.format(request_meta["status_code"], request_meta["response_time"], \
request_meta["content_size"]))
return response
def _send_request_safe_mode(self, method, url, **kwargs):
"""
Send a HTTP request, and catch any exception that might occur due to connection problems.
Safe mode has been removed from requests 1.x.
"""
try:
prepare_kwargs(method, kwargs)
return requests.Session.request(self, method, url, **kwargs)
except (MissingSchema, InvalidSchema, InvalidURL):
raise
except RequestException as ex:
resp = ApiResponse()
resp.error = ex
resp.status_code = 0 # with this status_code, content returns None
resp.request = Request(method, url).prepare()
return resp
|
[
"httprunner"
] |
httprunner
|
6aa6c6c2e52debf96874619e6fbbdf59fef074ed
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/confidentialledger/azure-confidentialledger/tests/_shared/client_test_common.py
|
bede3f24bfa399b552959f905ed8251774a61e19
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 |
MIT
| 2019-08-11T21:16:01 | 2018-11-28T21:34:49 |
Python
|
UTF-8
|
Python
| false | false | 10,554 |
py
|
import hashlib
import os
import tempfile
from azure.confidentialledger import (
LedgerUserRole,
TransactionState,
)
from .constants import NETWORK_CERTIFICATE, USER_CERTIFICATE
from .testcase import ConfidentialLedgerTestCase
CONFIDENTIAL_LEDGER_URL = "https://fake-confidential-ledger.azure.com"
class ConfidentialLedgerClientTestMixin:
class BaseTest(ConfidentialLedgerTestCase):
def setUp(self):
super(ConfidentialLedgerClientTestMixin.BaseTest, self).setUp()
self.confidential_ledger_url = self.set_value_to_scrub(
"CONFIDENTIAL_LEDGER_URL", CONFIDENTIAL_LEDGER_URL
)
with tempfile.NamedTemporaryFile(
"w", suffix=".pem", delete=False
) as tls_cert_file:
tls_cert_file.write(NETWORK_CERTIFICATE)
self.network_certificate_path = tls_cert_file.name
with tempfile.NamedTemporaryFile(
"w", suffix=".pem", delete=False
) as user_cert_file:
user_cert_file.write(
self.set_value_to_scrub(
"CONFIDENTIAL_LEDGER_USER_CERTIFICATE", USER_CERTIFICATE
)
)
self.user_certificate_path = user_cert_file.name
def tearDown(self):
os.remove(self.user_certificate_path)
os.remove(self.network_certificate_path)
return super(ConfidentialLedgerClientTestMixin.BaseTest, self).tearDown()
def test_append_entry_flow(self):
entry_contents = "Test entry from Python SDK"
append_result = self.client.append_to_ledger(entry_contents=entry_contents)
self.assertTrue(append_result.transaction_id)
self.assertTrue(append_result.sub_ledger_id)
# Test unpacking
append_result_sub_ledger_id, append_result_transaction_id = append_result
self.client.wait_until_durable(transaction_id=append_result_transaction_id)
transaction_status = self.client.get_transaction_status(
transaction_id=append_result_transaction_id
)
self.assertIsNotNone(transaction_status)
self.assertIs(transaction_status.state, TransactionState.COMMITTED)
self.assertEqual(
transaction_status.transaction_id, append_result_transaction_id
)
receipt = self.client.get_transaction_receipt(
transaction_id=append_result_transaction_id
)
self.assertEqual(receipt.transaction_id, append_result_transaction_id)
self.assertTrue(receipt.contents)
latest_entry = self.client.get_ledger_entry()
# The transaction ids may not be equal in the unfortunate edge case where a governance
# operation occurs after the ledger append (e.g. because a node was restarted). Then,
# the latest id will be higher.
self.assertGreaterEqual(
latest_entry.transaction_id, append_result_transaction_id
)
self.assertEqual(latest_entry.contents, entry_contents)
self.assertEqual(latest_entry.sub_ledger_id, append_result_sub_ledger_id)
self.client.append_to_ledger(
"Test entry 2 from Python SDK", wait_for_commit=True
)
latest_entry = self.client.get_ledger_entry()
self.assertNotEqual(
latest_entry.transaction_id, append_result_transaction_id
)
self.assertNotEqual(latest_entry.contents, entry_contents)
self.assertEqual(latest_entry.sub_ledger_id, append_result_sub_ledger_id)
original_entry = self.client.get_ledger_entry(
transaction_id=append_result_transaction_id
)
self.assertEqual(
original_entry.transaction_id, append_result_transaction_id
)
self.assertEqual(original_entry.contents, entry_contents)
self.assertEqual(original_entry.sub_ledger_id, append_result_sub_ledger_id)
def test_append_entry_flow_with_sub_ledger_id(self):
sub_ledger_id = "132"
entry_contents = "Test sub-ledger entry from Python SDK"
append_result = self.client.append_to_ledger(
entry_contents=entry_contents, sub_ledger_id=sub_ledger_id
)
self.assertTrue(append_result.transaction_id)
self.assertEqual(append_result.sub_ledger_id, sub_ledger_id)
# Test unpacking
append_result_sub_ledger_id, append_result_transaction_id = append_result
self.client.wait_until_durable(transaction_id=append_result_transaction_id)
transaction_status = self.client.get_transaction_status(
transaction_id=append_result_transaction_id
)
self.assertIsNotNone(transaction_status)
self.assertIs(transaction_status.state, TransactionState.COMMITTED)
self.assertEqual(
transaction_status.transaction_id, append_result_transaction_id
)
receipt = self.client.get_transaction_receipt(
transaction_id=append_result_transaction_id
)
self.assertEqual(receipt.transaction_id, append_result_transaction_id)
self.assertTrue(receipt.contents)
latest_entry = self.client.get_ledger_entry(sub_ledger_id=sub_ledger_id)
# The transaction ids may not be equal in the unfortunate edge case where a governance
# operation occurs after the ledger append (e.g. because a node was restarted). Then,
# the latest id will be higher.
self.assertGreaterEqual(
latest_entry.transaction_id, append_result_transaction_id
)
self.assertEqual(latest_entry.contents, entry_contents)
self.assertEqual(latest_entry.sub_ledger_id, append_result_sub_ledger_id)
self.client.append_to_ledger(
"Test sub-ledger entry 2 from Python SDK",
sub_ledger_id=sub_ledger_id,
wait_for_commit=True,
)
latest_entry = self.client.get_ledger_entry(sub_ledger_id=sub_ledger_id)
self.assertNotEqual(
latest_entry.transaction_id, append_result_transaction_id
)
self.assertNotEqual(latest_entry.contents, entry_contents)
self.assertEqual(latest_entry.sub_ledger_id, sub_ledger_id)
original_entry = self.client.get_ledger_entry(
transaction_id=append_result_transaction_id, sub_ledger_id=sub_ledger_id
)
self.assertEqual(
original_entry.transaction_id, append_result_transaction_id
)
self.assertEqual(original_entry.contents, entry_contents)
self.assertEqual(original_entry.sub_ledger_id, append_result_sub_ledger_id)
def test_range_query(self):
modulus = 5
num_messages_sent = 201 # Should result in 2 pages.
messages = {m: [] for m in range(modulus)}
for i in range(num_messages_sent):
message = "message-{0}".format(i)
kwargs = (
{} if modulus == 0 else {"sub_ledger_id": "{0}".format(i % modulus)}
)
append_result = self.client.append_to_ledger(
entry_contents=message, **kwargs
)
messages[i % modulus].append(
(append_result.transaction_id, message, kwargs)
)
num_matched = 0
for i in range(modulus):
query_result = self.client.get_ledger_entries(
from_transaction_id=messages[i][0][0], **messages[i][0][2]
)
for index, historical_entry in enumerate(query_result):
self.assertEqual(
historical_entry.transaction_id, messages[i][index][0]
)
self.assertEqual(historical_entry.contents, messages[i][index][1])
num_matched += 1
# Due to replication delay, it's possible not all messages are matched.
self.assertGreaterEqual(num_matched, 0.9 * num_messages_sent)
def test_user_management(self):
user_id = "0" * 36 # AAD Object Ids have length 36
user = self.client.create_or_update_user(
user_id, LedgerUserRole.CONTRIBUTOR
)
self.assertEqual(user.id, user_id)
self.assertEqual(user.role, LedgerUserRole.CONTRIBUTOR)
user = self.client.get_user(user_id)
self.assertEqual(user.id, user_id)
self.assertEqual(user.role, LedgerUserRole.CONTRIBUTOR)
self.client.delete_user(user_id)
user = self.client.create_or_update_user(user_id, LedgerUserRole.READER)
self.assertEqual(user.id, user_id)
self.assertEqual(user.role, LedgerUserRole.READER)
user = self.client.get_user(user_id)
self.assertEqual(user.id, user_id)
self.assertEqual(user.role, LedgerUserRole.READER)
self.client.delete_user(user_id)
def test_verification_methods(self):
consortium = self.client.get_consortium()
self.assertEqual(len(consortium.members), 1)
for member in consortium.members:
self.assertTrue(member.certificate)
self.assertTrue(member.id)
constitution = self.client.get_constitution()
self.assertTrue(constitution.contents)
self.assertTrue(constitution.digest)
self.assertEqual(
constitution.digest.lower(),
hashlib.sha256(constitution.contents.encode()).hexdigest().lower(),
)
ledger_enclaves = self.client.get_enclave_quotes()
self.assertEqual(len(ledger_enclaves.quotes), 3)
self.assertIn(ledger_enclaves.source_node, ledger_enclaves.quotes)
for node_id, quote in ledger_enclaves.quotes.items():
self.assertEqual(node_id, quote.node_id)
self.assertTrue(quote.node_id)
self.assertTrue(quote.mrenclave)
self.assertTrue(quote.raw_quote)
self.assertTrue(quote.version)
|
[
"[email protected]"
] | |
b8fc676b24f897ed85ac06ddd8a5dcf79961772a
|
bc441bb06b8948288f110af63feda4e798f30225
|
/api_gateway_sdk/model/easy_flow/deploy_batch_pb2.py
|
d10e3afba86214003b6501929a438872c85b6ba4
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | true | 5,564 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: deploy_batch.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from api_gateway_sdk.model.easy_flow import deploy_target_pb2 as api__gateway__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='deploy_batch.proto',
package='easy_flow',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flow'),
serialized_pb=_b('\n\x12\x64\x65ploy_batch.proto\x12\teasy_flow\x1a\x33\x61pi_gateway_sdk/model/easy_flow/deploy_target.proto\"\xbe\x01\n\x0b\x44\x65ployBatch\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x10\n\x08\x62\x61tchNum\x18\x02 \x01(\x05\x12\x15\n\rbatchInterval\x18\x03 \x01(\x05\x12/\n\x07\x62\x61tches\x18\x04 \x03(\x0b\x32\x1e.easy_flow.DeployBatch.Batches\x12\x12\n\nfailedStop\x18\x05 \x01(\x08\x1a\x33\n\x07\x42\x61tches\x12(\n\x07targets\x18\x01 \x03(\x0b\x32\x17.easy_flow.DeployTargetBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/easy_flowb\x06proto3')
,
dependencies=[api__gateway__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2.DESCRIPTOR,])
_DEPLOYBATCH_BATCHES = _descriptor.Descriptor(
name='Batches',
full_name='easy_flow.DeployBatch.Batches',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='targets', full_name='easy_flow.DeployBatch.Batches.targets', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=226,
serialized_end=277,
)
_DEPLOYBATCH = _descriptor.Descriptor(
name='DeployBatch',
full_name='easy_flow.DeployBatch',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='easy_flow.DeployBatch.type', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchNum', full_name='easy_flow.DeployBatch.batchNum', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batchInterval', full_name='easy_flow.DeployBatch.batchInterval', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='batches', full_name='easy_flow.DeployBatch.batches', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='failedStop', full_name='easy_flow.DeployBatch.failedStop', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_DEPLOYBATCH_BATCHES, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=87,
serialized_end=277,
)
_DEPLOYBATCH_BATCHES.fields_by_name['targets'].message_type = api__gateway__sdk_dot_model_dot_easy__flow_dot_deploy__target__pb2._DEPLOYTARGET
_DEPLOYBATCH_BATCHES.containing_type = _DEPLOYBATCH
_DEPLOYBATCH.fields_by_name['batches'].message_type = _DEPLOYBATCH_BATCHES
DESCRIPTOR.message_types_by_name['DeployBatch'] = _DEPLOYBATCH
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeployBatch = _reflection.GeneratedProtocolMessageType('DeployBatch', (_message.Message,), {
'Batches' : _reflection.GeneratedProtocolMessageType('Batches', (_message.Message,), {
'DESCRIPTOR' : _DEPLOYBATCH_BATCHES,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch.Batches)
})
,
'DESCRIPTOR' : _DEPLOYBATCH,
'__module__' : 'deploy_batch_pb2'
# @@protoc_insertion_point(class_scope:easy_flow.DeployBatch)
})
_sym_db.RegisterMessage(DeployBatch)
_sym_db.RegisterMessage(DeployBatch.Batches)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
5ad76af557aa31011c1d9fd48b790dd553e175b8
|
02d1d89ed3c2a71a4f5a36f3a19f0683a0ae37e5
|
/navigation/terrain_id/terrain_training/build/milk/milk/tests/test_perceptron.py
|
6069de0370dbe07b08d521102bee703edf4c253c
|
[
"MIT"
] |
permissive
|
lforet/robomow
|
49dbb0a1c873f75e11228e24878b1e977073118b
|
eca69d000dc77681a30734b073b2383c97ccc02e
|
refs/heads/master
| 2016-09-06T10:12:14.528565 | 2015-05-19T16:20:24 | 2015-05-19T16:20:24 | 820,388 | 11 | 6 | null | null | null | null |
UTF-8
|
Python
| false | false | 728 |
py
|
import numpy as np
from milk.supervised.perceptron import perceptron_learner
from milk.supervised import _perceptron
from milksets.yeast import load
def test_raw():
np.random.seed(23)
data = np.random.random((100,10))
data[50:] += .5
labels = np.repeat((0,1), 50)
weights = np.zeros((11))
eta = 0.1
for i in xrange(20):
_perceptron.perceptron(data, labels, weights, eta)
errs = _perceptron.perceptron(data, labels, weights, eta)
assert errs < 10
def test_wrapper():
features,labels = load()
labels = (labels >= 5)
learner = perceptron_learner()
model = learner.train(features, labels)
test = map(model.apply, features)
assert np.mean(labels != test) < .35
|
[
"[email protected]"
] | |
e82043fb547aed02d1c9a63e9a349ebb3ecee747
|
9163d7b7f9301b4a334ced0a91e28348fdaa8882
|
/other_function/generate_image_without_rule.py
|
d3223d3c6cfcb34fd6acd170520b6a7976211050
|
[
"Apache-2.0"
] |
permissive
|
frankiegu/generate_click_captcha
|
2c9c551bec69d5c40e6a1354ec6f7dbef18e6447
|
7fdb2cafe4c2b5d0245b9b8c4fc9a8b8dee5f3a9
|
refs/heads/master
| 2021-03-03T14:56:30.486088 | 2019-01-03T16:03:00 | 2019-01-03T16:03:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 345 |
py
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from PIL import Image
import random
img = Image.new("RGB", (640, 480), (0, 255, 0))
w, h = img.size
for i in range(h):
for j in range(w):
a = random.randint(10, 230)
b = random.randint(10, 230)
c = random.randint(10, 230)
img.putpixel((j, i), (a, b, c))
img.show()
|
[
"[email protected]"
] | |
d3296ed2a784f2ba7881764db316ee68412339b7
|
60aac823e576a1c415bc25901e113ad0f52fbd9a
|
/abc204/b.py
|
a639b661cf948d6b00950b036601a1220c600983
|
[] |
no_license
|
nishiwakki/atCoder
|
d4209e717529ab606d0e6fceb0ce170d228b1532
|
fc5a6b667aa8c11c368fc712c5633da5ebf6bdf2
|
refs/heads/main
| 2023-06-23T08:08:12.072211 | 2021-07-24T14:01:52 | 2021-07-24T14:01:52 | 309,862,702 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 143 |
py
|
# -*- coding: UTF-8 -*-
N = int(input())
A = list(map(int, input().split()))
ans = 0
for a in A:
if a > 10:
ans += a-10
print(ans)
|
[
"[email protected]"
] | |
54da71a35a2983c730ede3d625d2a5f53256bc8f
|
50dd2a43daa8316fc11e0c176b5872738fcc5dde
|
/Learning/125_GetSpotlightNewPics/GetSpotlightNewPics.py
|
4940becf12590fdad010000d2471096aef24c6a1
|
[] |
no_license
|
FrenchBear/Python
|
58204d368e3e72071eef298ff00d06ff51bd7914
|
b41ab4b6a59ee9e145ef2cd887a5fe306973962b
|
refs/heads/master
| 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,151 |
py
|
# GetSpotlightNewPics
# Retrieves new Spotlight (microsoft wallpapers) pictures
#
# 2023-03-25 PV
# 2023-04-12 PV Logfile
# 2023-07-19 PV Added missing import
import datetime
import shutil
import os
from common_fs import get_files, extension_part
source = r'C:\Users\Pierr\AppData\Local\Packages\Microsoft.Windows.ContentDeliveryManager_cw5n1h2txyewy\LocalState\Assets'
dest = r'C:\Users\Pierr\OneDrive\PicturesODMisc\Papiers peints\Spotlight'
logfile = r'C:\temp\GetSpotlightNewPics-' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S.log')
doit = True
dest_files = set(file.lower() for file in get_files(dest) if extension_part(file.lower()) == '.jpg')
print("GetSpotlightNewPics started")
with open(logfile, 'w') as log:
copied = 0
for filebase in get_files(source):
file = filebase.lower()+'.jpg'
if file not in dest_files:
print('Add', file)
log.write(f'Add {file}\n')
copied += 1
if doit:
shutil.copyfile(os.path.join(source, filebase), os.path.join(dest, file))
print('Copied:', copied)
log.write(f'Copied: {copied}\n')
|
[
"[email protected]"
] | |
a340f7261fc62eeabc63f2815bac12c4125010b6
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/FuwuScoresGetRequest.py
|
74b17bad19a9821410b0b86c51d54b0c0426e9c7
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 356 |
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class FuwuScoresGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.current_page = None
self.date = None
self.page_size = None
def getapiname(self):
return 'taobao.fuwu.scores.get'
|
[
"[email protected]"
] | |
997ca3426e4c754a39df1f9a351c36b3da37e50a
|
64bdedbe9ede8c21f8daef2234faf248e8bcad2f
|
/flask_app/flask_blog/main/routes.py
|
5940ba8d9dd8fefe1821b71dae8384a32317b247
|
[
"MIT"
] |
permissive
|
Ziang-Lu/Flask-Blog
|
c02b5f6501af2d7f55350e337b5eed6a7f3d528b
|
aa0aa4d019de47e122cded8d4ff637d1b6edc410
|
refs/heads/master
| 2023-08-05T00:59:35.440152 | 2023-07-15T07:08:54 | 2023-07-15T07:08:54 | 203,568,155 | 1 | 0 |
MIT
| 2023-07-25T17:50:37 | 2019-08-21T11:15:05 |
Python
|
UTF-8
|
Python
| false | false | 2,022 |
py
|
# -*- coding: utf-8 -*-
"""
Flask main-related routes module.
"""
from datetime import datetime
import requests
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import current_user
from ..utils import POST_SERVICE, get_iter_pages
# Create a main-related blueprint
main_bp = Blueprint(name='main', import_name=__name__)
@main_bp.route('/')
@main_bp.route('/home')
def home():
"""
Home page.
:return:
"""
page = request.args.get('page', type=int, default=1)
request_url = f'{POST_SERVICE}/posts?page={page}&per_page=5'
username = request.args.get('user')
if username:
# Try to fetch all the posts by all the users that this user follows as
# well as this user himself
if not current_user.is_authenticated:
flash('Please log in first.', category='danger')
return redirect(url_for('auth.login'))
elif current_user.username != username:
flash(
'You can only view your own followed posts.', category='danger'
)
return redirect(url_for('main.home', user=current_user.username))
request_url += f'&user={username}'
r = requests.get(request_url)
paginated_data = r.json()
posts_data = paginated_data['data']['posts']
# Convert the datetime strings back to objects
for post in posts_data:
post['date_posted'] = datetime.fromisoformat(post['date_posted'])
pages = paginated_data['pagination_meta']['pages']
context = {
'p': {
'items': posts_data,
'page': page,
'pages': pages,
'total': paginated_data['pagination_meta']['total'],
'iter_pages': get_iter_pages(pages, page)
}
}
return render_template('home.html', **context)
@main_bp.route('/about')
def about():
"""
About page.
:return:
"""
context = {
'title': 'About'
}
return render_template('about.html', **context)
|
[
"[email protected]"
] | |
d34a9292cb308aac1c26003f0a06be2a49244505
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02953/s447346234.py
|
650a3b88da697c512c29fd85204e7c24463e8dae
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 236 |
py
|
N=int(input())
H=list(map(int,input().split()))
if N>1:
for i in range(N-1):
if H[i]<=H[i+1]-1:
H[i+1]-=1
if H[i]>H[i+1]:
print('No')
exit()
print('Yes')
else:
print('Yes')
|
[
"[email protected]"
] | |
43c481740de68c095956f67ca9dab167d5cca6b9
|
2cf560477807e9f3e869474defda47f2638347b4
|
/glitter_news/urls.py
|
35d0dcf2342ae5c739e2fabf9d84b2241c1e3c0d
|
[] |
no_license
|
axsapronov/django-glitter-news
|
37035d45fd1edbf619659b9451184500ab2fce33
|
000d548bafa8c777a3721611ba4620173713b87d
|
refs/heads/master
| 2021-06-06T10:06:39.859177 | 2016-04-13T11:50:18 | 2016-04-13T11:50:18 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 693 |
py
|
# -*- coding: utf-8 -*-
from django.conf.urls import url
from . import views, feeds
urlpatterns = [
url(
r'^$',
views.PostListView.as_view(),
name='list'
),
url(
r'^category/(?P<slug>[-\w]+)/$',
views.PostListCategoryView.as_view(),
name='post-list-category'
),
url(
r'^(?P<year>\d{4})/(?P<month>\d{2})/(?P<day>\d{2})/(?P<slug>[-\w]+)/$',
views.PostDetailView.as_view(),
name='post-detail'
),
url(
r'^feed/$',
feeds.NewsFeed(),
name='feed'
),
url(
r'^feed/(?P<slug>[-\w]+)/$',
feeds.NewsCategoryFeed(),
name='category-feed'
),
]
|
[
"[email protected]"
] | |
2047311125959d36fc25fbb7f1224110b20fae54
|
52107637d2687db30f168ba15ffd1e1e534f4cb4
|
/mirdata/datasets/tonas.py
|
ab1246b7ec924539ae8aa594a9b692e98d1cc01d
|
[
"BSD-3-Clause"
] |
permissive
|
mir-dataset-loaders/mirdata
|
9be10e0201b08abf51fc72338ccaaacc8216145b
|
496eb4a9120aa16ff6963792f0c8b738a0c3f310
|
refs/heads/master
| 2023-05-07T13:15:16.517429 | 2023-03-27T13:54:37 | 2023-03-27T13:54:37 | 170,765,267 | 297 | 65 |
BSD-3-Clause
| 2023-08-05T22:48:48 | 2019-02-14T22:11:33 |
Python
|
UTF-8
|
Python
| false | false | 12,719 |
py
|
"""
TONAS Loader
.. admonition:: Dataset Info
:class: dropdown
This dataset contains a music collection of 72 sung excerpts representative of three a cappella singing styles
(Deblas, and two variants of Martinete). It has been developed within the COFLA research project context.
The distribution is as follows:
1. 16 Deblas
2. 36 Martinete 1
3. 20 Martinete 2
This collection was built in the context of a study on similarity and style classification of flamenco a cappella
singing styles (Tonas) by the flamenco expert Dr. Joaquin Mora, Universidad de Sevilla.
We refer to (Mora et al. 2010) for a comprehensive description of the considered styles and their musical
characteristics. All 72 excerpts are monophonic, their average duration is 30 seconds and there is enough
variability for a proper evaluation of our methods, including a variety of singers, recording conditions,
presence of percussion, clapping, background voices and noise. We also provide manual melodic transcriptions,
generated by the COFLA team and Cristina López Gómez.
The annotations are represented by specifying the value (in this case, Notes and F0) at the related timestamps.
TONAS' note and F0 annotations also have "Energy" information, which refers to the average energy value through
all the frames in which a note or a F0 value is comprised.
Using this dataset:
TONAS dataset can be obtained upon request. Please refer to this link: https://zenodo.org/record/1290722 to
request access and follow the indications of the .download() method for a proper storing and organization
of the TONAS dataset.
Citing this dataset:
When TONAS is used for academic research, we would highly appreciate if scientific publications of works partly
based on the TONAS dataset quote the following publication:
- Music material: Mora, J., Gomez, F., Gomez, E., Escobar-Borrego, F.J., Diaz-Banez, J.M. (2010). Melodic
Characterization and Similarity in A Cappella Flamenco Cantes. 11th International Society for Music Information
Retrieval Conference (ISMIR 2010).
- Transcriptions: Gomez, E., Bonada, J. (in Press). Towards Computer-Assisted Flamenco Transcription: An
Experimental Comparison of Automatic Transcription Algorithms As Applied to A Cappella Singing.
Computer Music Journal.
"""
import csv
import logging
import os
from typing import TextIO, Tuple, Optional
from deprecated.sphinx import deprecated
import librosa
import numpy as np
from smart_open import open
from mirdata import annotations, jams_utils, core, io
BIBTEX = """
Music material:
@inproceedings{tonas_music,
author = {Mora, Joaquin and Gómez, Francisco and Gómez, Emilia
and Borrego, Francisco Javier and Díaz-Báñez, José},
year = {2010},
month = {01},
pages = {351-356},
title = {Characterization and Similarity in A Cappella Flamenco Cantes.}
}
Transcriptions:
@inproceedings{tonas_annotations,
author = {E. {Gómez} and J. {Bonada}},
journal = {Computer Music Journal},
title = {Towards Computer-Assisted Flamenco Transcription: An Experimental
Comparison of Automatic Transcription Algorithms as Applied to A
Cappella Singing},
year = {2013},
volume = {37},
number = {2},
pages = {73-90},
doi = {10.1162/COMJ_a_00180}}
"""
INDEXES = {
"default": "1.0",
"test": "1.0",
"1.0": core.Index(filename="tonas_index_1.0.json"),
}
DOWNLOAD_INFO = """
PLEASE READ CAREFULLY ALL THE INFORMATION SO YOU DON'T MISS ANY STEP:
Unfortunately, the TONAS dataset is not available to be shared openly. However,
you can request access to the dataset in the following link, providing a brief
explanation of what your are going to use the dataset for:
==> https://zenodo.org/record/1290722
Then, unzip the dataset, change the dataset name to: "tonas" (with lowercase),
and locate it to {}. If you unzip it into a different path, please remember to set the
right data_home when initializing the dataset.
"""
LICENSE_INFO = """
The TONAS dataset is offered free of charge for internal non-commercial use only. You can not redistribute it nor
modify it. Dataset by COFLA team. Copyright © 2012 COFLA project, Universidad de Sevilla. Distribution rights granted
to Music Technology Group, Universitat Pompeu Fabra. All Rights Reserved.
"""
class Track(core.Track):
"""TONAS track class
Args:
track_id (str): track id of the track
data_home (str): Local path where the dataset is stored.
If `None`, looks for the data in the default directory, `~/mir_datasets/TONAS`
Attributes:
f0_path (str): local path where f0 melody annotation file is stored
notes_path(str): local path where notation annotation file is stored
audio_path(str): local path where audio file is stored
track_id (str): track id
singer (str): performing singer (cantaor)
title (str): title of the track song
tuning_frequency (float): tuning frequency of the symbolic notation
Cached Properties:
f0_automatic (F0Data): automatically extracted f0
f0_corrected (F0Data): manually corrected f0 annotations
notes (NoteData): annotated notes
"""
def __init__(self, track_id, data_home, dataset_name, index, metadata):
super().__init__(track_id, data_home, dataset_name, index, metadata)
self.f0_path = self.get_path("f0")
self.notes_path = self.get_path("notes")
self.audio_path = self.get_path("audio")
@property
def style(self):
return self._track_metadata.get("style")
@property
def singer(self):
return self._track_metadata.get("singer")
@property
def title(self):
return self._track_metadata.get("title")
@property
def tuning_frequency(self):
return _load_tuning_frequency(self.notes_path)
@property
def audio(self) -> Tuple[np.ndarray, float]:
"""The track's audio
Returns:
* np.ndarray - audio signal
* float - sample rate
"""
return load_audio(self.audio_path)
@core.cached_property
def f0_automatic(self) -> Optional[annotations.F0Data]:
return load_f0(self.f0_path, False)
@core.cached_property
def f0_corrected(self) -> Optional[annotations.F0Data]:
return load_f0(self.f0_path, True)
@property
def f0(self):
logging.warning(
"Track.f0 is deprecated as of 0.3.4 and will be removed in a future version. Use"
" Track.f0_automatic or Track.f0_corrected"
)
return self.f0_corrected
@core.cached_property
def notes(self) -> Optional[annotations.NoteData]:
return load_notes(self.notes_path)
def to_jams(self):
"""Get the track's data in jams format
Returns:
jams.JAMS: the track's data in jams format
"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
f0_data=[(self.f0, "pitch_contour")],
note_data=[(self.notes, "note_hz")],
metadata=self._track_metadata,
)
def load_audio(fhandle: str) -> Tuple[np.ndarray, float]:
"""Load a TONAS audio file.
Args:
fhandle (str): path to an audio file
Returns:
* np.ndarray - the mono audio signal
* float - The sample rate of the audio file
"""
return librosa.load(fhandle, sr=44100, mono=True)
# no decorator because of https://github.com/mir-dataset-loaders/mirdata/issues/503
def load_f0(fpath: str, corrected: bool) -> Optional[annotations.F0Data]:
"""Load TONAS f0 annotations
Args:
fpath (str): path pointing to f0 annotation file
corrected (bool): if True, loads manually corrected frequency values
otherwise, loads automatically extracted frequency values
Returns:
F0Data: predominant f0 melody
"""
times = []
freqs = []
freqs_corr = []
energies = []
with open(fpath, "r") as fhandle:
reader = np.genfromtxt(fhandle)
for line in reader:
times.append(float(line[0]))
freqs.append(float(line[2]))
freqs_corr.append(float(line[3]))
energies.append(float(line[1]))
freq_array = np.array(freqs_corr if corrected else freqs, dtype="float")
energy_array = np.array(energies, dtype="float")
voicing_array = (freq_array > 0).astype("float")
return annotations.F0Data(
np.array(times, dtype="float"),
"s",
freq_array,
"hz",
voicing_array,
"binary",
energy_array,
"energy",
)
@io.coerce_to_string_io
def load_notes(fhandle: TextIO) -> Optional[annotations.NoteData]:
"""Load TONAS note data from the annotation files
Args:
fhandle (str or file-like): path or file-like object pointing to a notes annotation file
Returns:
NoteData: note annotations
"""
intervals = []
pitches = []
energy = []
reader = csv.reader(fhandle, delimiter=",")
tuning = next(reader)[0]
for line in reader:
intervals.append([line[0], float(line[0]) + float(line[1])])
# Convert midi value to frequency
note_hz = _midi_to_hz(float(line[2]), float(tuning))
pitches.append(note_hz)
energy.append(float(line[3]))
note_data = annotations.NoteData(
np.array(intervals, dtype="float"),
"s",
np.array(pitches, dtype="float"),
"hz",
np.array(energy, dtype="float"),
"energy",
)
return note_data
@io.coerce_to_string_io
def _load_tuning_frequency(fhandle: TextIO) -> float:
"""Load tuning frequency of the track with re
Args:
fhandle (str or file-like): path or file-like object pointing to a notes annotation file
Returns:
tuning_frequency (float): returns new tuning frequency considering the deviation
"""
# Compute tuning frequency
cents_deviation = float(next(csv.reader(fhandle, delimiter=","))[0])
tuning_frequency = 440 * (
2 ** (cents_deviation / 1200)
) # Frequency of A (common value is 440Hz)
return tuning_frequency
def _midi_to_hz(midi_note, tuning_deviation):
"""Function to convert MIDI to Hz with certain tuning freq
Args:
midi_note (float): note represented in midi value
tuning_deviation (float): deviation in cents with respect to 440Hz
Returns:
(float): note in Hz considering the new tuning frequency
"""
tuning_frequency = 440 * (
2 ** (tuning_deviation / 1200)
) # Frequency of A (common value is 440Hz)
return (tuning_frequency / 32) * (2 ** ((midi_note - 9) / 12))
@core.docstring_inherit(core.Dataset)
class Dataset(core.Dataset):
"""
The TONAS dataset
"""
def __init__(self, data_home=None, version="default"):
super().__init__(
data_home,
version,
name="tonas",
track_class=Track,
bibtex=BIBTEX,
indexes=INDEXES,
download_info=DOWNLOAD_INFO,
license_info=LICENSE_INFO,
)
@core.cached_property
def _metadata(self):
metadata_path = os.path.join(self.data_home, "TONAS-Metadata.txt")
metadata = {}
try:
with open(metadata_path, "r", errors="ignore") as f:
reader = csv.reader(
(x.replace("\0", "") for x in f), delimiter="\t"
) # Fix wrong byte
for line in reader:
if line: # Do not consider empty lines
index = line[0].replace(".wav", "")
metadata[index] = {
"style": line[1],
"title": line[2],
"singer": line[3],
}
except FileNotFoundError:
raise FileNotFoundError("Metadata not found. Did you run .download()?")
return metadata
@deprecated(reason="Use mirdata.datasets.tonas.load_audio", version="0.3.4")
def load_audio(self, *args, **kwargs):
return load_audio(*args, **kwargs)
@deprecated(reason="Use mirdata.datasets.tonas.load_f0", version="0.3.4")
def load_f0(self, *args, **kwargs):
return load_f0(*args, **kwargs)
@deprecated(reason="Use mirdata.datasets.tonas.load_notes", version="0.3.4")
def load_notes(self, *args, **kwargs):
return load_notes(*args, **kwargs)
|
[
"[email protected]"
] | |
1fbeed6d1f4cf47293abb9b8829b7839b6fd8f97
|
7d8a4d58fc4c5a73ce8c85e513253a86d6290d3b
|
/plugin.video.fen/resources/lib/modules/player.py
|
249b06a8a63cab8e5773dd11789c1c58150c8a88
|
[] |
no_license
|
bopopescu/icon
|
cda26d4463d264b7e2080da51f29d84cc48dfb81
|
e385a6225dd11b7fea5a11215d655cf5006bb018
|
refs/heads/master
| 2022-01-12T19:00:04.951604 | 2019-07-10T05:35:44 | 2019-07-10T05:35:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,191 |
py
|
# -*- coding: utf-8 -*-
import xbmc, xbmcplugin, xbmcgui
import sys
import urllib
import json
from urlparse import parse_qsl
from resources.lib.modules.indicators_bookmarks import detect_bookmark, erase_bookmark
from resources.lib.modules.nav_utils import hide_busy_dialog, close_all_dialog
from resources.lib.modules.utils import sec2time
import settings
# from resources.lib.modules.utils import logger
__handle__ = int(sys.argv[1])
window = xbmcgui.Window(10000)
class FenPlayer(xbmc.Player):
def __init__ (self):
xbmc.Player.__init__(self)
self.set_resume = settings.set_resume()
self.set_watched = settings.set_watched()
self.autoplay_nextep = settings.autoplay_next_episode()
self.nextep_threshold = settings.nextep_threshold()
self.nextep_info = None
self.delete_nextep_playcount = True
def run(self, url=None):
params = dict(parse_qsl(sys.argv[2].replace('?','')))
rootname = params.get('rootname', '')
try:
if rootname == 'nill':
url = urllib.unquote(params.get("url"))
self.play(url)
return
url = url if url else params.get("url") if 'url' in params else None
url = urllib.unquote(url)
if not url: return
self.meta = json.loads(window.getProperty('fen_media_meta'))
rootname = self.meta['rootname'] if 'rootname' in self.meta else ''
bookmark = self.bookmarkChoice()
if bookmark == -1: return
self.meta.update({'url': url, 'bookmark': bookmark})
listitem = xbmcgui.ListItem(path=url)
try:
listitem.setProperty('StartPercent', str(self.meta.get('bookmark')))
listitem.setArt({'poster': self.meta.get('poster'), 'fanart': self.meta.get('fanart'),
'banner': self.meta.get('banner'), 'clearlogo': self.meta.get('clearlogo'),
'landscape': self.meta.get('landscape')})
if self.meta['vid_type'] == 'movie':
listitem.setInfo(
'video', {'mediatype': 'movie', 'trailer': str(self.meta['trailer']),
'title': self.meta['title'], 'size': '0', 'duration': self.meta['duration'],
'plot': self.meta['plot'], 'rating': self.meta['rating'], 'premiered': self.meta['premiered'],
'studio': self.meta['studio'],'year': self.meta['year'], 'genre': self.meta['genre'],
'tagline': self.meta['tagline'], 'code': self.meta['imdb_id'], 'imdbnumber': self.meta['imdb_id'],
'director': self.meta['director'], 'writer': self.meta['writer'], 'votes': self.meta['votes']})
elif self.meta['vid_type'] == 'episode':
listitem.setInfo(
'video', {'mediatype': 'episode', 'trailer': str(self.meta['trailer']), 'title': self.meta['ep_name'],
'tvshowtitle': self.meta['title'], 'size': '0', 'plot': self.meta['plot'], 'year': self.meta['year'],
'premiered': self.meta['premiered'], 'genre': self.meta['genre'], 'season': int(self.meta['season']),
'episode': int(self.meta['episode']), 'duration': str(self.meta['duration']), 'rating': self.meta['rating']})
except: pass
try:
self.play(url, listitem)
except:
xbmcplugin.setResolvedUrl(__handle__, True, listitem)
self.monitor()
except: return
def bookmarkChoice(self):
season = self.meta.get('season', '')
episode = self.meta.get('episode', '')
if season == 0: season = ''
if episode == 0: episode = ''
bookmark = 0
try: resume_point, curr_time = detect_bookmark(self.meta['vid_type'], self.meta['media_id'], season, episode)
except: resume_point = 0
if resume_point > 0:
percent = resume_point
raw_time = float(curr_time)
time = sec2time(raw_time, n_msec=0)
bookmark = self.getResumeStatus(time, percent, bookmark, self.meta.get('from_library', None))
if bookmark == 0: erase_bookmark(self.meta['vid_type'], self.meta['media_id'], season, episode)
return bookmark
def getResumeStatus(self, time, percent, bookmark, from_library):
if settings.auto_resume(): return percent
dialog = xbmcgui.Dialog()
xbmc.sleep(600)
choice = dialog.contextmenu(['Resume from [B]%s[/B]' % time, 'Start from Beginning'])
return percent if choice == 0 else bookmark if choice == 1 else -1
def monitor(self):
self.library_setting = 'library' if 'from_library' in self.meta else None
self.autoplay_next_episode = True if self.meta['vid_type'] == 'episode' and self.autoplay_nextep else False
while not self.isPlayingVideo():
xbmc.sleep(100)
close_all_dialog()
while self.isPlayingVideo():
try:
self.total_time = self.getTotalTime()
self.curr_time = self.getTime()
xbmc.sleep(100)
if self.autoplay_next_episode:
current_point = round(float(self.curr_time/self.total_time*100),1)
if current_point >= self.nextep_threshold:
if not self.nextep_info:
self.nextEpPrep()
else: pass
except: pass
self.mediaWatchedMarker()
def mediaWatchedMarker(self):
try:
if self.delete_nextep_playcount: window.clearProperty('current_autoplay_next_number')
resume_point = round(float(self.curr_time/self.total_time*100),1)
from_search = 'true'
xbmc.sleep(3000)
if self.set_resume < resume_point < self.set_watched:
from resources.lib.modules.indicators_bookmarks import set_bookmark
set_bookmark(self.meta['vid_type'], self.meta['media_id'], self.curr_time, self.total_time, self.meta.get('season', ''), self.meta.get('episode', ''), from_search)
elif resume_point > self.set_watched:
if self.meta['vid_type'] == 'movie':
from resources.lib.modules.indicators_bookmarks import mark_movie_as_watched_unwatched
watched_function = mark_movie_as_watched_unwatched
watched_params = {"mode": "mark_movie_as_watched_unwatched", "action": 'mark_as_watched',
"media_id": self.meta['media_id'], "title": self.meta['title'], "year": self.meta['year'],
"from_search": from_search}
else:
from resources.lib.modules.indicators_bookmarks import mark_episode_as_watched_unwatched
watched_function = mark_episode_as_watched_unwatched
watched_params = {"mode": "mark_episode_as_watched_unwatched", "action": "mark_as_watched",
"season": self.meta['season'], "episode": self.meta['episode'], "media_id": self.meta['media_id'],
"title": self.meta['title'], "year": self.meta['year'], "imdb_id": self.meta['imdb_id'],
"tvdb_id": self.meta["tvdb_id"], "from_search": from_search}
watched_function(watched_params)
else: pass
except: pass
return
def nextEpPrep(self):
auto_nextep_limit_reached = False
autoplay_next_check_threshold = settings.autoplay_next_check_threshold()
try: current_number = int(window.getProperty('current_autoplay_next_number'))
except: current_number = 1
if autoplay_next_check_threshold != 0:
if current_number == autoplay_next_check_threshold:
auto_nextep_limit_reached = True
continue_playing = xbmcgui.Dialog().yesno('Fen Next Episode', '[B]Are you still watching %s?[/B]' % self.meta['title'], '', '', 'Not Watching', 'Still Watching', 10000)
if not continue_playing == 1:
from resources.lib.modules.nav_utils import notification
notification('Fen Next Episode Cancelled', 6000)
self.nextep_info = {'pass': True}
if not self.nextep_info:
from resources.lib.modules.next_episode import nextep_playback_info, nextep_play
self.nextep_info = nextep_playback_info(self.meta['tmdb_id'], int(self.meta['season']), int(self.meta['episode']), self.library_setting)
if not self.nextep_info.get('pass', False):
if not auto_nextep_limit_reached: self.delete_nextep_playcount = False
window.setProperty('current_autoplay_next_number', str(current_number+1))
nextep_play(self.nextep_info)
# def onAVStarted(self):
# close_all_dialog()
# def onPlayBackStarted(self):
# close_all_dialog()
def playAudioAlbum(self, t_files=None, name=None, from_seperate=False):
import os
import xbmcaddon
from resources.lib.modules.utils import clean_file_name, batch_replace, to_utf8
from resources.lib.modules.nav_utils import setView
__addon_id__ = 'plugin.video.fen'
__addon__ = xbmcaddon.Addon(id=__addon_id__)
__handle__ = int(sys.argv[1])
addon_dir = xbmc.translatePath(__addon__.getAddonInfo('path'))
icon_directory = settings.get_theme()
default_furk_icon = os.path.join(icon_directory, 'furk.png')
formats = ('.3gp', ''), ('.aac', ''), ('.flac', ''), ('.m4a', ''), ('.mp3', ''), \
('.ogg', ''), ('.raw', ''), ('.wav', ''), ('.wma', ''), ('.webm', ''), ('.ra', ''), ('.rm', '')
params = dict(parse_qsl(sys.argv[2].replace('?','')))
furk_files_list = []
playlist = xbmc.PlayList(xbmc.PLAYLIST_MUSIC)
playlist.clear()
if from_seperate: t_files = [i for i in t_files if clean_file_name(i['path']) == params.get('item_path')]
for item in t_files:
try:
name = item['path'] if not name else name
if not 'audio' in item['ct']: continue
url = item['url_dl']
track_name = clean_file_name(batch_replace(to_utf8(item['name']), formats))
listitem = xbmcgui.ListItem(track_name)
listitem.setThumbnailImage(default_furk_icon)
listitem.setInfo(type='music',infoLabels={'title': track_name, 'size': int(item['size']), 'album': clean_file_name(batch_replace(to_utf8(name), formats)),'duration': item['length']})
listitem.setProperty('mimetype', 'audio/mpeg')
playlist.add(url, listitem)
if from_seperate: furk_files_list.append((url, listitem, False))
except: pass
self.play(playlist)
if from_seperate:
xbmcplugin.addDirectoryItems(__handle__, furk_files_list, len(furk_files_list))
setView('view.furk_files')
xbmcplugin.endOfDirectory(__handle__)
|
[
"[email protected]"
] | |
7c3dfc04897f0d4d50c778ed9925e8a9a3c4fdb4
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/data/p2DJ/New/R2/benchmark/startCirq84.py
|
a797a7c4d5d7e49cf7b5aedb3b512c541b22a87e
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,711 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=2
# total number=9
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=6
c.append(cirq.X.on(input_qubit[1])) # number=7
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=8
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=4
c.append(cirq.X.on(input_qubit[1])) # number=2
c.append(cirq.X.on(input_qubit[1])) # number=3
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq84.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"[email protected]"
] | |
f8bf83673f352023ed8c7419fc205c8eee6cfc42
|
e9f096e564afc9f0cfabaeaac67c2ff2b1c46d24
|
/pymysql01/update.py
|
db767436d4285c1e11b745dfe591e848422f87e9
|
[] |
no_license
|
zh199609/pythonLearning
|
dcb4bfb4560fab0ac66a88934af278489abff38d
|
430c70f1892966cf8f6b01e30e3a7996e83cc7ff
|
refs/heads/master
| 2021-11-21T12:49:21.522780 | 2021-08-31T13:14:55 | 2021-08-31T13:14:55 | 238,373,672 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 469 |
py
|
# 创建连接
import pymysql
conn = pymysql.connect(host="localhost", user='root', password='root', database='books', autocommit=True)
# 获取游标
cursor = conn.cursor()
# 执行sql
sql = "update t_book set title = '西游记修改01' where id = 4"
cursor.execute(sql)
print("影响的记录数:", cursor.rowcount)
# 关闭游标
cursor.close()
# 关闭连接
conn.close()
try:
print('try')
except Exception as e:
print(e)
finally:
print('finally')
|
[
"[email protected]"
] | |
b8f99ce6a6829a95a5e4779296dbd76a1d416365
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-bms/huaweicloudsdkbms/v1/model/address_info.py
|
5ab5c287376294e8571e0f93ba2a0651682fba42
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 6,832 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class AddressInfo:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'version': 'str',
'addr': 'str',
'os_ext_ip_stype': 'str',
'os_ext_ips_ma_cmac_addr': 'str',
'os_ext_ip_sport_id': 'str'
}
attribute_map = {
'version': 'version',
'addr': 'addr',
'os_ext_ip_stype': 'OS-EXT-IPS:type',
'os_ext_ips_ma_cmac_addr': 'OS-EXT-IPS-MAC:mac_addr',
'os_ext_ip_sport_id': 'OS-EXT-IPS:port_id'
}
def __init__(self, version=None, addr=None, os_ext_ip_stype=None, os_ext_ips_ma_cmac_addr=None, os_ext_ip_sport_id=None):
"""AddressInfo
The model defined in huaweicloud sdk
:param version: IP地址版本。4:代表IPv4。6:代表IPv6。
:type version: str
:param addr: IP地址
:type addr: str
:param os_ext_ip_stype: IP地址类型。fixed:代表私有IP地址。floating:代表浮动IP地址。
:type os_ext_ip_stype: str
:param os_ext_ips_ma_cmac_addr: MAC地址。
:type os_ext_ips_ma_cmac_addr: str
:param os_ext_ip_sport_id: IP地址对应的端口ID
:type os_ext_ip_sport_id: str
"""
self._version = None
self._addr = None
self._os_ext_ip_stype = None
self._os_ext_ips_ma_cmac_addr = None
self._os_ext_ip_sport_id = None
self.discriminator = None
self.version = version
self.addr = addr
if os_ext_ip_stype is not None:
self.os_ext_ip_stype = os_ext_ip_stype
if os_ext_ips_ma_cmac_addr is not None:
self.os_ext_ips_ma_cmac_addr = os_ext_ips_ma_cmac_addr
if os_ext_ip_sport_id is not None:
self.os_ext_ip_sport_id = os_ext_ip_sport_id
@property
def version(self):
"""Gets the version of this AddressInfo.
IP地址版本。4:代表IPv4。6:代表IPv6。
:return: The version of this AddressInfo.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this AddressInfo.
IP地址版本。4:代表IPv4。6:代表IPv6。
:param version: The version of this AddressInfo.
:type version: str
"""
self._version = version
@property
def addr(self):
"""Gets the addr of this AddressInfo.
IP地址
:return: The addr of this AddressInfo.
:rtype: str
"""
return self._addr
@addr.setter
def addr(self, addr):
"""Sets the addr of this AddressInfo.
IP地址
:param addr: The addr of this AddressInfo.
:type addr: str
"""
self._addr = addr
@property
def os_ext_ip_stype(self):
"""Gets the os_ext_ip_stype of this AddressInfo.
IP地址类型。fixed:代表私有IP地址。floating:代表浮动IP地址。
:return: The os_ext_ip_stype of this AddressInfo.
:rtype: str
"""
return self._os_ext_ip_stype
@os_ext_ip_stype.setter
def os_ext_ip_stype(self, os_ext_ip_stype):
"""Sets the os_ext_ip_stype of this AddressInfo.
IP地址类型。fixed:代表私有IP地址。floating:代表浮动IP地址。
:param os_ext_ip_stype: The os_ext_ip_stype of this AddressInfo.
:type os_ext_ip_stype: str
"""
self._os_ext_ip_stype = os_ext_ip_stype
@property
def os_ext_ips_ma_cmac_addr(self):
"""Gets the os_ext_ips_ma_cmac_addr of this AddressInfo.
MAC地址。
:return: The os_ext_ips_ma_cmac_addr of this AddressInfo.
:rtype: str
"""
return self._os_ext_ips_ma_cmac_addr
@os_ext_ips_ma_cmac_addr.setter
def os_ext_ips_ma_cmac_addr(self, os_ext_ips_ma_cmac_addr):
"""Sets the os_ext_ips_ma_cmac_addr of this AddressInfo.
MAC地址。
:param os_ext_ips_ma_cmac_addr: The os_ext_ips_ma_cmac_addr of this AddressInfo.
:type os_ext_ips_ma_cmac_addr: str
"""
self._os_ext_ips_ma_cmac_addr = os_ext_ips_ma_cmac_addr
@property
def os_ext_ip_sport_id(self):
"""Gets the os_ext_ip_sport_id of this AddressInfo.
IP地址对应的端口ID
:return: The os_ext_ip_sport_id of this AddressInfo.
:rtype: str
"""
return self._os_ext_ip_sport_id
@os_ext_ip_sport_id.setter
def os_ext_ip_sport_id(self, os_ext_ip_sport_id):
"""Sets the os_ext_ip_sport_id of this AddressInfo.
IP地址对应的端口ID
:param os_ext_ip_sport_id: The os_ext_ip_sport_id of this AddressInfo.
:type os_ext_ip_sport_id: str
"""
self._os_ext_ip_sport_id = os_ext_ip_sport_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddressInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
f3e7038704cb777b458dce69e5130f105f8dd4d7
|
6ecff67d6103ddbd787f78c35182722b83b8a37e
|
/백준/Python/카테고리/DFS/11724(연결 요소의 개수).py
|
7a4091ce202ede0784f000483c362a969b976680
|
[] |
no_license
|
jsungmin6/Algorithm
|
9ef2339aa00921e7df756a8dff569954a008c118
|
bc1ea9de9f7ba3f1aa6616ebef8719540d72e0bf
|
refs/heads/master
| 2023-05-27T06:24:16.123307 | 2021-06-11T09:22:21 | 2021-06-11T09:22:21 | 259,299,624 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 733 |
py
|
'''
visited 와 graph를 만들어 구하는게 아닐까
'''
from collections import deque
import sys
input = sys.stdin.readline
N,M = map(int,input().split())
visited = [0]*(N+1)
graph = [[] for i in range(N+1)]
def dfs(i):
visited[i] = 1
need_visit=deque(graph[i])
while need_visit:
node = need_visit.popleft()
if visited[node] !=0:
continue
visited[node] = 1
for k in graph[node]:
if visited[k] !=0:
continue
need_visit.append(k)
for _ in range(M):
u,v = map(int,input().split())
graph[u].append(v)
graph[v].append(u)
cnt=0
for i in range(1,N+1):
if visited[i] != 1:
dfs(i)
cnt+=1
print(cnt)
|
[
"[email protected]"
] | |
26b2fcf42fe20f5c02c69785b561a485bae9c91f
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/PhysicsAnalysis/D3PDMaker/QcdD3PDMaker/python/JSTrackJets.py
|
f04421edf54f821f6b1056046460cf90fb8a50b8
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,137 |
py
|
# Copyright (C) 2002-2017 CERN for the benefit of the ATLAS collaboration
###############################################################
#
# JetTrackZClusterTool jobOptions file
# # PhysicsAnalysis/JetEtMissDPDModifier/share/JetTrackZClusterTool_jobOptions.py
# # https://svnweb.cern.ch/trac/atlasoff/browser/PhysicsAnalysis/D3PDMaker/SUSYD3PDMaker/trunk/share/JetTrackZClusterTool_jobOptions.py
#==============================================================
#--------------------------------------------------------------
# JetTrackZClusterTool Options
#--------------------------------------------------------------
# Import configurable for tool
from JetRecTools.JetRecToolsConf import JetTrackZClusterTool
from JetRec.JetGetters import *
from JetRec.JetRecConf import *
def createJSTrackJets ( theseq, myjetfinder, myjetdr ):
# Setup tool so that it can be used
JS_MyJetTrackZClusterTool = JetTrackZClusterTool( "JS_JetTrackZClusterTool_%s%d" % (myjetfinder,myjetdr*10) )
JS_MyJetTrackZClusterTool.TrackJetMinMulti = 2
JS_MyJetTrackZClusterTool.TrackJetMinPt = 4000 # MeV
JS_MyJetTrackZClusterTool.UseVtxSeeding = True
JS_MyJetTrackZClusterTool.DeltaZRange = 10000.0
JS_MyJetTrackZClusterTool.TrackParticleContainerName = "TrackParticleCandidate"
JS_MyJetTrackZClusterTool.VxContainerName = "VxPrimaryCandidate"
JS_MyJetTrackZClusterTool.OutputLevel = 3
#--------------------------------------------------------------
# TrackSelector Tool Options
#--------------------------------------------------------------
from InDetTrackSelectorTool.InDetTrackSelectorToolConf import InDet__InDetDetailedTrackSelectorTool
from AthenaCommon.AppMgr import ToolSvc
trackSelector = InDet__InDetDetailedTrackSelectorTool( "JS_MyDetailedTrackSelectorTool" )
ToolSvc += trackSelector
#See InDetDetailedTrackSelectorTool.h for additional options and defaults
trackSelector.pTMin = 500 # MeV
trackSelector.etaMax = 2.5
trackSelector.nHitBLayer = 0
trackSelector.nHitPix = 0
trackSelector.nHitBLayerPlusPix = 1 #0
trackSelector.nHitSct = 6 #0
trackSelector.nHitSi = 7 #7
trackSelector.nHitTrt = 0
trackSelector.IPd0Max = 1.0 #1 # d0 cut
trackSelector.IPz0Max = 1.5 # z0*sin(theta) cut
trackSelector.z0Max = 200 # z0 cut
trackSelector.fitChi2OnNdfMax = 10000 #1000 #3.5 #3
trackSelector.d0significanceMax = -1.
trackSelector.z0significanceMax = -1.
# Try to set InDet default tools to avoid strange bugs
try:
trackSelector.Extrapolator = ToolSvc.InDetExtrapolator
except:
from AthenaCommon.Logging import logging
l = logging.getLogger("TrackSelectionForJets::setupTrackSelectorTool")
l.warning("No ToolSvc.InDetExtrapolator available. Tracking might cause infinite loop")
pass
#trackSelector.OutputLevel = 3
from TrkTrackSummaryTool.AtlasTrackSummaryTool import AtlasTrackSummaryTool
atst = AtlasTrackSummaryTool()
ToolSvc += atst
trackSelector.TrackSummaryTool = atst
##
from JetSubStructure.JetSubStructureConf import JetSubStructure__CachedTrackSelectorTool
CachedTrackSelector = JetSubStructure__CachedTrackSelectorTool("JS_CachedTrackSelectorTool")
ToolSvc += CachedTrackSelector
CachedTrackSelector.TrackSelector = trackSelector
##
# Tell "JetTrackZClusterTool" to use this tool
JS_MyJetTrackZClusterTool.TrackSelector = CachedTrackSelector.TrackSelector
#--------------------------------------------------------------
# JetFinder Tool Options (Anti-Kt)
#--------------------------------------------------------------
from JetRec.JetRecConf import JetFastJetFinderTool
myfastfinder = JetFastJetFinderTool("JS_%s%dTrackJetFinder" % (myjetfinder,myjetdr*10))
if myjetfinder == 'AntiKt':
myfastfinder.Algorithm = "anti-kt"
elif myjetfinder == 'CamKt':
myfastfinder.Algorithm = "cambridge"
myfastfinder.Radius = myjetdr
myfastfinder.RecombScheme = "E"
myfastfinder.Strategy = "Best"
myfastfinder.FailIfMisconfigured = True
myfastfinder.Inclusive = True
myfastfinder.CalculateJetArea = False
myfastfinder.StoreNFlipValues = 0
ToolSvc += myfastfinder
# Tell "TrackZClusterTool" to use this tool
JS_MyJetTrackZClusterTool.JetFinder = myfastfinder
#-------------------------------------------------------------
# Jet Getter
#-------------------------------------------------------------
JS_TrackZToolList = [JS_MyJetTrackZClusterTool,
JetSignalSelectorTool('JSTZ_JetFinalPtCut',UseTransverseMomentum = True,MinimumSignal= jetFlags.finalMinEt()),
JetSorterTool('JSTZ_JetSorter',SortOrder="ByPtDown") ]
mytrackzjetgetter = make_StandardJetGetter(myjetfinder, myjetdr,'TrackZ',seq = theseq, allTools = JS_TrackZToolList)
return mytrackzjetgetter
#==============================================================
#
# End of job options file
#
###############################################################
|
[
"[email protected]"
] | |
16173f46fa2faecd70a0cdf9cbd51a926f590924
|
17268419060d62dabb6e9b9ca70742f0a5ba1494
|
/pp/types.py
|
4dd35737400a72f4e304189cf8101cc22720358d
|
[
"MIT"
] |
permissive
|
TrendingTechnology/gdsfactory
|
a19124423b12cbbb4f35b61f33303e9a012f82e5
|
c968558dba1bae7a0421bdf49dc192068147b776
|
refs/heads/master
| 2023-02-22T03:05:16.412440 | 2021-01-24T03:38:00 | 2021-01-24T03:38:00 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 577 |
py
|
"""Common data types."""
from typing import Callable, Dict, Tuple, Union
from pp.component import Component
Layer = Tuple[int, int]
ComponentOrFunction = Union[Callable, Component]
NameToFunctionDict = Dict[str, Callable]
Number = Union[float, int]
def get_name_to_function_dict(*functions) -> Dict[str, Callable]:
"""Returns a dict with function name as key and function as value"""
return {func.__name__: func for func in functions}
__all__ = [
"Layer",
"ComponentOrFunction",
"NameToFunctionDict",
"Number",
"get_name_to_function_dict",
]
|
[
"[email protected]"
] | |
c5f27f2348c5d7b812eaf52d13e8b3cb56d2b862
|
fcc7dd9d9c7f22808f907759cec6a339c5a1421c
|
/my_workstation/my-v2/core.PrePostInitMeta.py
|
3edabd35eb190368ee17c81725ad1b667c000f50
|
[] |
no_license
|
EmbraceLife/fastai_treasures
|
3ae792771af3510848c7bb19003b04cff8001e1e
|
4230be915e70a7e5a22f2f7e5137cca7045754fd
|
refs/heads/master
| 2022-10-30T07:46:46.397037 | 2020-09-09T04:09:14 | 2020-09-09T04:09:14 | 173,718,178 | 21 | 6 | null | 2022-10-06T05:32:28 | 2019-03-04T09:52:12 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 2,111 |
py
|
from local.test import *
from local.imports import *
from local.notebook.showdoc import show_doc
from local.core import *
# check the official source
show_doc(PrePostInitMeta, title_level=3)
class PrePostInitMeta(type):
"""
"A metaclass that calls optional `__pre_init__` and `__post_init__` methods"
Why need `PrePostInitMeta`?
- not only run `__init__`, but also
- automatically run `__pre_init__`, `__post_init__`
How to use `PrePostInitMeta`?
- create a subclass to `PrePostInitMeta`
- you can add `__pre_init__` and `__post_init__` to `__init__`
- program will run them in the order of `__pre_init__`, `__init__` and `__post_init__`
- if any of them is missing, a `_pass()` method will run instead
How to create `PrePostInitMeta`?
- how to lay out the logic flow?
- use pdb break at the first line of `__new__`
- basically `__new__` run before running `t=_T()`
- to prepare all the additional methods of `x` or `_T`
"""
def __new__(cls, name, bases, dct):
# pdb break here to run the hidden codes
x = super().__new__(cls, name, bases, dct)
def _pass(self, *args,**kwargs): pass
for o in ('__init__', '__pre_init__', '__post_init__'):
if not hasattr(x,o): setattr(x,o,_pass)
old_init = x.__init__
@functools.wraps(old_init)
def _init(self,*args,**kwargs):
self.__pre_init__()
old_init(self, *args,**kwargs)
self.__post_init__()
setattr(x, '__init__', _init)
return x
# simple but standard example
class _T(metaclass=PrePostInitMeta):
def __pre_init__(self): self.a = 0; assert self.a==0
def __init__(self): self.a += 1; assert self.a==1
def __post_init__(self): self.a += 1; assert self.a==2
t = _T() #pdb
t.a
# what would happen when lacking __pre_init__
class _T(metaclass=PrePostInitMeta):
def __pre_init__(self): self.a = 0; assert self.a==0
def __init__(self): self.a += 1; assert self.a==1
# def __post_init__(self): self.a += 1; assert self.a==2
t = _T()#pdb
t.a
|
[
"[email protected]"
] | |
0ead5a6450132dc13eb9ea466a731aed48251aa1
|
0880faa6ef7f30da63a74739203b0f9d7d4fe10e
|
/wesbanco/items.py
|
b03a2b1076a10cef81ffb6f9dcaca01f7f808313
|
[] |
no_license
|
wesleybowen/wesbanco
|
11c930d9facd5f2ee64b31316192796635529eb9
|
75e29924f2b57cd4b8da84a04e74fe10a5547942
|
refs/heads/main
| 2023-03-26T14:01:32.826370 | 2021-03-25T08:56:09 | 2021-03-25T08:56:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 135 |
py
|
import scrapy
class WesbancoItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
|
[
"[email protected]"
] | |
e9ba4883701edcd0c2ced8d8e106f5dd022f75af
|
d659d8f29ef2f01ac1fc8c292e8a836ba36bfd16
|
/src/transformers/models/blenderbot_small/modeling_blenderbot_small.py
|
d3e80f02224a7672245d9fcd858c436a9ace5d96
|
[
"Apache-2.0"
] |
permissive
|
monuminu/transformers
|
dc0fa4730ca6e37c2053f8b327c37bb322dd99cc
|
82c6e8fc0efaece02eb6e809b7b03df909ff6c11
|
refs/heads/master
| 2023-07-15T13:13:23.525788 | 2021-08-03T09:41:42 | 2021-08-03T09:41:42 | 369,849,849 | 1 | 0 |
Apache-2.0
| 2021-05-29T05:26:17 | 2021-05-22T16:00:10 |
Python
|
UTF-8
|
Python
| false | false | 72,328 |
py
|
# coding=utf-8
# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" PyTorch BlenderbotSmall model. """
import copy
import math
import random
from typing import Optional, Tuple
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss
from ...activations import ACT2FN
from ...file_utils import (
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_outputs import (
BaseModelOutput,
BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions,
Seq2SeqLMOutput,
Seq2SeqModelOutput,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_blenderbot_small import BlenderbotSmallConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "BlenderbotSmallConfig"
_TOKENIZER_FOR_DOC = "BlenderbotSmallTokenizer"
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST = [
"facebook/blenderbot_small-90M",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
]
# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
"""
Shift input ids one token to the right.
"""
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
shifted_input_ids[:, 0] = decoder_start_token_id
assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
# replace possible -100 values in labels by `pad_token_id`
shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
return shifted_input_ids
# Copied from transformers.models.bart.modeling_bart._make_causal_mask
def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0):
"""
Make causal mask used for bi-directional self-attention.
"""
bsz, tgt_len = input_ids_shape
mask = torch.full((tgt_len, tgt_len), float("-inf"))
mask_cond = torch.arange(mask.size(-1))
mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
mask = mask.to(dtype)
if past_key_values_length > 0:
mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1)
return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length)
# Copied from transformers.models.bart.modeling_bart._expand_mask
def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min)
# Copied from transformers.models.blenderbot.modeling_blenderbot.BlenderbotLearnedPositionalEmbedding with Blenderbot->BlenderbotSmall
class BlenderbotSmallLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
super().__init__(num_embeddings, embedding_dim)
def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = torch.arange(
past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
)
return super().forward(positions)
# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BlenderbotSmall
class BlenderbotSmallAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = torch.cat([past_key_value[0], key_states], dim=2)
value_states = torch.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
if attention_mask is not None:
if attention_mask.size() != (bsz, 1, tgt_len, src_len):
raise ValueError(
f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
)
attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = F.softmax(attn_weights, dim=-1)
if layer_head_mask is not None:
if layer_head_mask.size() != (self.num_heads,):
raise ValueError(
f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
)
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit awkward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to be reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
else:
attn_weights_reshaped = None
attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.bmm(attn_probs, value_states)
if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
)
attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
attn_output = attn_output.transpose(1, 2)
attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
# Copied from transformers.models.bart.modeling_bart.BartEncoderLayer with Bart->BlenderbotSmall
class BlenderbotSmallEncoderLayer(nn.Module):
def __init__(self, config: BlenderbotSmallConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.encoder_attention_heads,
dropout=config.attention_dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
layer_head_mask: torch.Tensor,
output_attentions: bool = False,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states, attn_weights, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if hidden_states.dtype == torch.float16 and (
torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
):
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.bart.modeling_bart.BartDecoderLayer with Bart->BlenderbotSmall
class BlenderbotSmallDecoderLayer(nn.Module):
def __init__(self, config: BlenderbotSmallConfig):
super().__init__()
self.embed_dim = config.d_model
self.self_attn = BlenderbotSmallAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BlenderbotSmallAttention(
self.embed_dim,
config.decoder_attention_heads,
dropout=config.attention_dropout,
is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
layer_head_mask: Optional[torch.Tensor] = None,
cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
past_key_value: Optional[Tuple[torch.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
"""
Args:
hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)`
attention_mask (:obj:`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size
`(encoder_attention_heads,)`.
cross_attn_layer_head_mask (:obj:`torch.FloatTensor`): mask for cross-attention heads in a given layer of
size `(decoder_attention_heads,)`.
past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states=hidden_states,
past_key_value=self_attn_past_key_value,
attention_mask=attention_mask,
layer_head_mask=layer_head_mask,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
hidden_states=hidden_states,
key_value_states=encoder_hidden_states,
attention_mask=encoder_attention_mask,
layer_head_mask=cross_attn_layer_head_mask,
past_key_value=cross_attn_past_key_value,
output_attentions=output_attentions,
)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class BlenderbotSmallPreTrainedModel(PreTrainedModel):
config_class = BlenderbotSmallConfig
base_model_prefix = "model"
def _init_weights(self, module):
std = self.config.init_std
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=std)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=std)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
@property
def dummy_inputs(self):
pad_token = self.config.pad_token_id
input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
dummy_inputs = {
"attention_mask": input_ids.ne(pad_token),
"input_ids": input_ids,
"decoder_input_ids": input_ids,
}
return dummy_inputs
BLENDERBOT_SMALL_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic
methods the library implements for all its model (such as downloading or saving, resizing the input embeddings,
pruning heads etc.)
This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__
subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to
general usage and behavior.
Parameters:
config (:class:`~transformers.BlenderbotSmallConfig`):
Model configuration class with all the parameters of the model. Initializing with a config file does not
load the weights associated with the model, only the configuration. Check out the
:meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BLENDERBOT_SMALL_GENERATION_EXAMPLE = r"""
Conversation example::
>>> from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForConditionalGeneration
>>> mname = 'facebook/blenderbot_small-90M'
>>> model = BlenderbotSmallForConditionalGeneration.from_pretrained(mname)
>>> tokenizer = BlenderbotSmallTokenizer.from_pretrained(mname)
>>> UTTERANCE = "My friends are cool but they eat too many carbs."
>>> print("Human: ", UTTERANCE)
>>> inputs = tokenizer([UTTERANCE], return_tensors='pt')
>>> inputs.pop("token_type_ids")
>>> reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
what kind of carbs do they eat? i don't know much about carbs.
>>> REPLY = "I'm not sure"
>>> print("Human: ", REPLY)
>>> NEXT_UTTERANCE = (
... "My friends are cool but they eat too many carbs.</s> "
... "<s>what kind of carbs do they eat? i don't know much about carbs.</s> "
... "<s>I'm not sure."
... )
>>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors='pt')
>>> inputs.pop("token_type_ids")
>>> next_reply_ids = model.generate(**inputs)
>>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
"""
BLENDERBOT_SMALL_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
it.
Indices can be obtained using :class:`~transformers.BlenderbotSmallTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.BlenderbotSmallTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for
details.
`What are decoder input IDs? <../glossary.html#decoder-input-ids>`__
BlenderbotSmall uses the :obj:`bos_token_id` as the starting token for :obj:`decoder_input_ids` generation.
If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see
:obj:`past_key_values`).
decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`):
Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will
also be used by default.
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in ``[0,
1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`):
Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`:
:obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`,
`optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded
representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds`
have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert
:obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds`
takes the value of :obj:`inputs_embeds`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
class BlenderbotSmallEncoder(BlenderbotSmallPreTrainedModel):
"""
Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
:class:`BlenderbotSmallEncoderLayer`.
Args:
config: BlenderbotSmallConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.encoder_layerdrop
embed_dim = config.d_model
self.padding_idx = config.pad_token_id
self.max_source_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
embed_dim,
)
self.layers = nn.ModuleList([BlenderbotSmallEncoderLayer(config) for _ in range(config.encoder_layers)])
self.layernorm_embedding = nn.LayerNorm(embed_dim)
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BlenderbotSmallTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
embed_pos = self.embed_positions(input_shape)
hidden_states = inputs_embeds + embed_pos
hidden_states = self.layernorm_embedding(hidden_states)
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# expand attention_mask
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype)
encoder_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, encoder_layer in enumerate(self.layers):
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop): # skip the layer
layer_outputs = (None, None)
else:
if getattr(self.config, "gradient_checkpointing", False) and self.training:
def create_custom_forward(module):
def custom_forward(*inputs):
return module(*inputs, output_attentions)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(encoder_layer),
hidden_states,
attention_mask,
(head_mask[idx] if head_mask is not None else None),
)
else:
layer_outputs = encoder_layer(
hidden_states,
attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
output_attentions=output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
if output_hidden_states:
encoder_states = encoder_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
return BaseModelOutput(
last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
)
class BlenderbotSmallDecoder(BlenderbotSmallPreTrainedModel):
"""
Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a
:class:`BlenderbotSmallDecoderLayer`
Args:
config: BlenderbotSmallConfig
embed_tokens (torch.nn.Embedding): output embedding
"""
def __init__(self, config: BlenderbotSmallConfig, embed_tokens: Optional[nn.Embedding] = None):
super().__init__(config)
self.dropout = config.dropout
self.layerdrop = config.decoder_layerdrop
self.padding_idx = config.pad_token_id
self.max_target_positions = config.max_position_embeddings
self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
self.embed_positions = BlenderbotSmallLearnedPositionalEmbedding(
config.max_position_embeddings,
config.d_model,
)
self.layers = nn.ModuleList([BlenderbotSmallDecoderLayer(config) for _ in range(config.decoder_layers)])
self.layernorm_embedding = nn.LayerNorm(config.d_model)
self.init_weights()
def get_input_embeddings(self):
return self.embed_tokens
def set_input_embeddings(self, value):
self.embed_tokens = value
# Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length
).to(self.device)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
combined_attention_mask = (
expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BlenderbotSmallTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
of the decoder.
encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`):
Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
cross-attention on hidden heads. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last
:obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of
shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size,
sequence_length)`.
inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded
representation. This is useful if you want more control over how to convert :obj:`input_ids` indices
into associated vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1])
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
# BlenderbotSmall applies layer norm on hidden_states
inputs_embeds = self.layernorm_embedding(inputs_embeds)
hidden_states = inputs_embeds + positions
hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
next_decoder_cache = () if use_cache else None
# check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
if attn_mask is not None:
assert attn_mask.size()[0] == (
len(self.layers)
), f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = past_key_values[idx] if past_key_values is not None else None
if getattr(self.config, "gradient_checkpointing", False) and self.training:
if use_cache:
logger.warning(
"`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting "
"`use_cache=False`..."
)
use_cache = False
def create_custom_forward(module):
def custom_forward(*inputs):
# None for past_key_value
return module(*inputs, output_attentions, use_cache)
return custom_forward
layer_outputs = torch.utils.checkpoint.checkpoint(
create_custom_forward(decoder_layer),
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask[idx] if head_mask is not None else None,
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
None,
)
else:
layer_outputs = decoder_layer(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
layer_head_mask=(head_mask[idx] if head_mask is not None else None),
cross_attn_layer_head_mask=(
cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
),
past_key_value=past_key_value,
output_attentions=output_attentions,
use_cache=use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
if not return_dict:
return tuple(
v
for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
if v is not None
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=next_cache,
hidden_states=all_hidden_states,
attentions=all_self_attns,
cross_attentions=all_cross_attentions,
)
@add_start_docstrings(
"The bare BlenderbotSmall Model outputting raw hidden-states without any specific head on top.",
BLENDERBOT_SMALL_START_DOCSTRING,
)
class BlenderbotSmallModel(BlenderbotSmallPreTrainedModel):
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
padding_idx, vocab_size = config.pad_token_id, config.vocab_size
self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
self.encoder = BlenderbotSmallEncoder(config, self.shared)
self.decoder = BlenderbotSmallDecoder(config, self.shared)
self.init_weights()
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
return self.encoder
def get_decoder(self):
return self.decoder
@add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Returns:
Example::
>>> from transformers import BlenderbotSmallTokenizer, BlenderbotSmallModel
>>> model = BlenderbotSmallModel.from_pretrained("facebook/blenderbot_small-90M")
>>> tokenizer = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot_small-90M")
>>> input_ids = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt").input_ids # Batch size 1
>>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
>>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
use_cache = use_cache if use_cache is not None else self.config.use_cache
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids=input_ids,
attention_mask=attention_mask,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
# If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
encoder_outputs = BaseModelOutput(
last_hidden_state=encoder_outputs[0],
hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
encoder_hidden_states=encoder_outputs[0],
encoder_attention_mask=attention_mask,
head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
if not return_dict:
return decoder_outputs + encoder_outputs
return Seq2SeqModelOutput(
last_hidden_state=decoder_outputs.last_hidden_state,
past_key_values=decoder_outputs.past_key_values,
decoder_hidden_states=decoder_outputs.hidden_states,
decoder_attentions=decoder_outputs.attentions,
cross_attentions=decoder_outputs.cross_attentions,
encoder_last_hidden_state=encoder_outputs.last_hidden_state,
encoder_hidden_states=encoder_outputs.hidden_states,
encoder_attentions=encoder_outputs.attentions,
)
@add_start_docstrings(
"The BlenderbotSmall Model with a language modeling head. Can be used for summarization.",
BLENDERBOT_SMALL_START_DOCSTRING,
)
class BlenderbotSmallForConditionalGeneration(BlenderbotSmallPreTrainedModel):
base_model_prefix = "model"
_keys_to_ignore_on_load_missing = [
r"final_logits_bias",
r"encoder\.version",
r"decoder\.version",
r"lm_head\.weight",
]
def __init__(self, config: BlenderbotSmallConfig):
super().__init__(config)
self.model = BlenderbotSmallModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
self.init_weights()
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
@add_start_docstrings_to_model_forward(BLENDERBOT_SMALL_INPUTS_DOCSTRING)
@replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
@add_end_docstrings(BLENDERBOT_SMALL_GENERATION_EXAMPLE)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``.
Returns:
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
encoder_outputs=encoder_outputs,
decoder_attention_mask=decoder_attention_mask,
head_mask=head_mask,
decoder_head_mask=decoder_head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return Seq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
)
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
decoder_head_mask=None,
cross_attn_head_mask=None,
use_cache=None,
encoder_outputs=None,
**kwargs
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
"use_cache": use_cache, # change this to avoid caching (presumably for debugging)
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],
)
return reordered_past
# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->BlenderbotSmall
class BlenderbotSmallDecoderWrapper(BlenderbotSmallPreTrainedModel):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the :class:`~transformers.EncoderDecoderModel` framework.
"""
def __init__(self, config):
super().__init__(config)
self.decoder = BlenderbotSmallDecoder(config)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->BlenderbotSmall
class BlenderbotSmallForCausalLM(BlenderbotSmallPreTrainedModel):
def __init__(self, config):
super().__init__(config)
config = copy.deepcopy(config)
config.is_decoder = True
config.is_encoder_decoder = False
self.model = BlenderbotSmallDecoderWrapper(config)
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.init_weights()
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
@replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
cross_attn_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
Args:
input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
provide it.
Indices can be obtained using :class:`~transformers.BlenderbotSmallTokenizer`. See
:meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__`
for details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
if the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
cross_attn_head_mask (:obj:`torch.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`):
Mask to nullify selected heads of the cross-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last ``decoder_input_ids``
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all ``decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`.
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should either be in ``[0, ...,
config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are
ignored (masked), the loss is only computed for the tokens with labels in ``[0, ...,
config.vocab_size]``.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under
returned tensors for more detail.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors
for more detail.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Returns:
Example::
>>> from transformers import BlenderbotSmallTokenizer, BlenderbotSmallForCausalLM
>>> tokenizer = BlenderbotSmallTokenizer.from_pretrained('facebook/bart-large')
>>> model = BlenderbotSmallForCausalLM.from_pretrained('facebook/bart-large', add_cross_attention=False)
>>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids=input_ids,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
head_mask=head_mask,
cross_attn_head_mask=cross_attn_head_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[1:]
return (loss,) + output if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
return reordered_past
|
[
"[email protected]"
] | |
8be82b1ecb01d00e2c59861f5272e0287388f6c9
|
eebe1e43538bcc11a0558b58f2e6a6d22abc6a4a
|
/DGesQuad/manage.py
|
bc8d80251e82e44eba5bd6aa3df20298527c7f17
|
[] |
no_license
|
zurcx/GesQuad
|
9718843be1e24e5a11572ad90a7a0da1065f15f6
|
e1b5413ecfb740fd92e2dac8858b9e86fbb6efad
|
refs/heads/master
| 2021-01-10T20:11:22.635706 | 2013-04-25T16:02:02 | 2013-04-25T16:02:02 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 251 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "DGesQuad.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
595539f9f15b03ff08b9a21351d7fb2c6c6668e1
|
405d5ab969287f184ea589e73d61cc3be1c5d12b
|
/kinparse/kinparse.py
|
4ebc18408467834295ad57576d63765bbbb410ce
|
[
"MIT"
] |
permissive
|
CBJamo/kinparse
|
a847c594f1bbed015797bb31ea8c15cc339f9d85
|
3f4e8e9d881290cbac8e7b71886b0ed6cab0cd9b
|
refs/heads/master
| 2020-03-09T06:32:02.646762 | 2018-04-08T13:02:52 | 2018-04-08T13:02:52 | 128,641,751 | 0 | 0 | null | 2018-04-08T13:01:53 | 2018-04-08T13:01:53 | null |
UTF-8
|
Python
| false | false | 7,150 |
py
|
# MIT license
#
# Copyright (C) 2016 by XESS Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Parsers for netlist files of various formats (only KiCad, at present).
"""
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from future import standard_library
standard_library.install_aliases()
from .py_2_3 import *
from pyparsing import *
THIS_MODULE = locals()
def _parse_netlist_kicad(text):
"""
Return a pyparsing object storing the contents of a KiCad netlist.
"""
def _paren_clause(keyword, value):
"""
Create a parser for a parenthesized list with an initial keyword.
"""
lp = Literal('(').suppress()
rp = Literal(')').suppress()
return (lp + Keyword(keyword, caseless=True).suppress() + value('val') + rp
)(keyword)
#++++++++++++++++++++++++++++ Parser Definition +++++++++++++++++++++++++++
# Basic elements.
word = Word(alphas)
inum = Word(nums)
fnum = Word(nums) + Optional(Literal('.') + Optional(Word(nums)))
string = ZeroOrMore(White()).suppress() + CharsNotIn('()') + ZeroOrMore(White()).suppress()
qstring = dblQuotedString() ^ sglQuotedString()
qstring.addParseAction(removeQuotes)
anystring = qstring ^ string
# Design section.
source = _paren_clause('source', Optional(anystring))
date = _paren_clause('date', Optional(anystring))
tool = _paren_clause('tool', Optional(anystring))
number = _paren_clause('number', inum)
name = _paren_clause('name', anystring)
names = _paren_clause('names', anystring)
tstamp = _paren_clause('tstamp', anystring)
tstamps = _paren_clause('tstamps', anystring)
title = _paren_clause('title', Optional(anystring))
company = _paren_clause('company', Optional(anystring))
rev = _paren_clause('rev', Optional(anystring))
value = _paren_clause('value', anystring)
comment = Group(_paren_clause('comment', number & value))
comments = Group(OneOrMore(comment))('comments')
title_block = _paren_clause('title_block', Optional(title) &
Optional(company) & Optional(rev) &
Optional(date) & Optional(source) & comments)
sheet = Group(_paren_clause('sheet', number + name + tstamps + Optional(title_block)))
sheets = Group(OneOrMore(sheet))('sheets')
design = _paren_clause('design', Optional(source) & Optional(date) &
Optional(tool) & Optional(sheets))
# Components section.
ref = _paren_clause('ref', anystring)
datasheet = _paren_clause('datasheet', anystring)
field = Group(_paren_clause('field', name & anystring('text')))
fields = _paren_clause('fields', ZeroOrMore(field))
lib = _paren_clause('lib', anystring)
part = _paren_clause('part', anystring)
footprint = _paren_clause('footprint', anystring)
libsource = _paren_clause('libsource', lib & part)
sheetpath = _paren_clause('sheetpath', names & tstamps)
comp = Group(_paren_clause('comp', ref & value & Optional(datasheet) &
Optional(fields) & Optional(libsource) & Optional(footprint) &
Optional(sheetpath) & Optional(tstamp)))
components = _paren_clause('components', ZeroOrMore(comp))
# Part library section.
description = _paren_clause('description', anystring)
docs = _paren_clause('docs', anystring)
pnum = _paren_clause('num', anystring)
ptype = _paren_clause('type', anystring)
pin = Group(_paren_clause('pin', pnum & name & ptype))
pins = _paren_clause('pins', ZeroOrMore(pin))
alias = Group(_paren_clause('alias', anystring))
aliases = _paren_clause('aliases', ZeroOrMore(alias))
fp = Group(_paren_clause('fp', anystring))
footprints = _paren_clause('footprints', ZeroOrMore(fp))
libpart = Group(_paren_clause('libpart', lib & part & Optional(
fields) & Optional(pins) & Optional(footprints) & Optional(aliases) &
Optional(description) & Optional(docs)))
libparts = _paren_clause('libparts', ZeroOrMore(libpart))
# Libraries section.
logical = _paren_clause('logical', anystring)
uri = _paren_clause('uri', anystring)
library = Group(_paren_clause('library', logical & uri))
libraries = _paren_clause('libraries', ZeroOrMore(library))
# Nets section.
code = _paren_clause('code', inum)
part_pin = _paren_clause('pin', anystring)
node = Group(_paren_clause('node', ref & part_pin))
nodes = Group(OneOrMore(node))('nodes')
net = Group(_paren_clause('net', code & name & nodes))
nets = _paren_clause('nets', ZeroOrMore(net))
# Entire netlist.
version = _paren_clause('version', word)
end_of_file = ZeroOrMore(White()) + stringEnd
parser = _paren_clause('export', version +
(design & components & Optional(libparts) & Optional(libraries) & nets
))('netlist') + end_of_file.suppress()
return parser.parseString(text)
def parse_netlist(src, tool='kicad'):
"""
Return a pyparsing object storing the contents of a netlist.
Args:
src: Either a text string, or a filename, or a file object that stores
the netlist.
Returns:
A pyparsing object that stores the netlist contents.
Exception:
PyparsingException.
"""
try:
text = src.read()
except Exception:
try:
text = open(src,'r').read()
except Exception:
text = src
if not isinstance(text, basestring):
raise Exception("What is this shit you're handing me? [{}]\n".format(src))
try:
# Use the tool name to find the function for loading the library.
func_name = '_parse_netlist_{}'.format(tool)
parse_func = THIS_MODULE[func_name]
return parse_func(text)
except KeyError:
# OK, that didn't work so well...
logger.error('Unsupported ECAD tool library: {}'.format(tool))
raise Exception
|
[
"[email protected]"
] | |
479c637690d2609afb78de27ddf5b36e90b1d02e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03011/s370991894.py
|
b6c99e9bd6ab11dbd9f475b8b3e37ac126223374
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 62 |
py
|
time=list(map(int,input().split()))
print(sum(time)-max(time))
|
[
"[email protected]"
] | |
695b7501286789e70267ff4ce0dcf1ccb349a120
|
6a63a3b241e161d1e69f1521077617ad86f31eab
|
/release/rllib_tests/multi_gpu_learning_tests/run.py
|
7835956daf7972e9fe0bad297fab58c3f0d175a8
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
jovany-wang/ray
|
47a9df67e8ea26337517d625df50eb0b8b892135
|
227aef381a605cb1ebccbba4e84b840634196a35
|
refs/heads/master
| 2023-09-03T23:53:00.050619 | 2022-08-20T21:50:52 | 2022-08-20T21:50:52 | 240,190,407 | 1 | 1 |
Apache-2.0
| 2023-03-04T08:57:04 | 2020-02-13T06:13:19 |
Python
|
UTF-8
|
Python
| false | false | 818 |
py
|
"""Multi-GPU learning tests for RLlib (torch and tf).
"""
import json
import os
from pathlib import Path
from ray.rllib.utils.test_utils import run_learning_tests_from_yaml
if __name__ == "__main__":
# Get path of this very script to look for yaml files.
abs_yaml_path = Path(__file__).parent
print("abs_yaml_path={}".format(abs_yaml_path))
yaml_files = abs_yaml_path.rglob("*.yaml")
yaml_files = sorted(
map(lambda path: str(path.absolute()), yaml_files), reverse=True
)
# Run all tests in the found yaml files.
results = run_learning_tests_from_yaml(yaml_files)
test_output_json = os.environ.get(
"TEST_OUTPUT_JSON", "/tmp/rllib_multi_gpu_learning_tests.json"
)
with open(test_output_json, "wt") as f:
json.dump(results, f)
print("Ok.")
|
[
"[email protected]"
] | |
7edca1e18a6672b3567128648ab71cf7a75a0200
|
e6d55aa3c68644bdfe37a9472931c01950e27609
|
/ceggelab/ce/models.py
|
4fef63b2b397da38f1d012d9759b53a3e5d2848a
|
[] |
no_license
|
KamonratNg/cegge
|
2c5597554f183479d4f18a10d9e4132a299ea591
|
64e3b3012ea96f2fd1c25f1581e83a9c193f7092
|
refs/heads/master
| 2023-01-06T18:23:46.668810 | 2020-11-11T17:56:54 | 2020-11-11T17:56:54 | 309,621,257 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 562 |
py
|
from django.db import models
# Create your models here.
class Studentrecord(models.Model):
student_name_en = models.CharField(max_length=200)
student_name_th = models.CharField(max_length=200)
student_ID = models.IntegerField(default=0)
type = (('M','Master programs'),
('D','Doctoral programs'),
('P','Postdoctorate'),
('R','Researcher'))
student_level = models.CharField(max_length=30, choices= type)
def __str__(self):
return self.student_name_en +' '+ str(self.student_ID) +' '+ self.student_level
|
[
"[email protected]"
] | |
9f5a25ac8aac1ab659663cbe98f1f78fec020788
|
b3e42025194b81680086d097fed9aa6c84bfce9a
|
/apps/vendors/locations_urls.py
|
98387686246dcb4232cee0b5d2523610b972344e
|
[
"MIT"
] |
permissive
|
superdev999/ProvenBanking
|
95c65698d9f3a552d04edfd4fd9d4469fb43a47f
|
2153e9d737e2b235e502c848986ca35b6f310b8d
|
refs/heads/master
| 2021-01-12T05:17:31.864890 | 2017-08-17T00:11:59 | 2017-08-17T00:11:59 | 77,897,989 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
from django.conf.urls import patterns, include, url
from med_social.decorators import member_required
from vendors.views import (CreateVendorLocation,
EditVendorLocation, DeleteVendorLocation, VendorLocationList)
# namespace = locations
urlpatterns = patterns('',
url(r'^(?P<pk>\d+)/create/$', member_required(CreateVendorLocation.as_view()), name='create'),
url(r'^(?P<pk>\d+)/list/$', member_required(VendorLocationList.as_view()), name='list'),
url(r'^(?P<pk>\d+)/(?P<loc_pk>\d+)/edit/$', member_required(EditVendorLocation.as_view()), name='edit'),
url(r'^(?P<pk>\d+)/delete/$', member_required(DeleteVendorLocation.as_view()), name='delete'),
)
|
[
"[email protected]"
] | |
088511264beb5b545d0e43bf09441aa35f1c34e7
|
f845225329fa9750c838bf511fed3beb48cc86af
|
/listings/migrations/0001_initial.py
|
9ea3d0e0a6433a346296dafd185e3909e32ea71a
|
[] |
no_license
|
Fabricourt/btre_project-
|
ac8c2b84cc8b7f4f5368a204dc23b378d488b356
|
13defd495ba309ac31550d22ad7d6306638f91eb
|
refs/heads/master
| 2020-04-15T11:03:05.980170 | 2019-01-08T11:16:56 | 2019-01-08T11:16:56 | 164,611,152 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,168 |
py
|
# Generated by Django 2.1.4 on 2018-12-23 13:47
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('realtors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('address', models.CharField(max_length=200)),
('city', models.CharField(max_length=100)),
('state', models.CharField(max_length=100)),
('zipcode', models.CharField(max_length=20)),
('description', models.TextField(blank=True)),
('price', models.IntegerField()),
('bedrooms', models.IntegerField()),
('bathrooms', models.DecimalField(decimal_places=1, max_digits=2)),
('garage', models.IntegerField(default=0)),
('sqft', models.IntegerField()),
('lot_size', models.DecimalField(decimal_places=1, max_digits=5)),
('photo_main', models.ImageField(upload_to='photos/%Y/%m/%d/')),
('photo_1', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_2', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_3', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_4', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_5', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('photo_6', models.ImageField(blank=True, upload_to='photos/%Y/%m/%d/')),
('is_published', models.BooleanField(default=True)),
('list_date', models.DateTimeField(blank=True, default=datetime.datetime.now)),
('realtor', models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='realtors.Realtor')),
],
),
]
|
[
"[email protected]"
] | |
4dd1a54ae37966be0ac487524f3fed672f577f6a
|
e0980f704a573894350e285f66f4cf390837238e
|
/.history/streams/blocks_20201023164513.py
|
720eace3df9f2dc075a573f46a06fa46c97fa7f1
|
[] |
no_license
|
rucpata/WagtailWebsite
|
28008474ec779d12ef43bceb61827168274a8b61
|
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
|
refs/heads/main
| 2023-02-09T15:30:02.133415 | 2021-01-05T14:55:45 | 2021-01-05T14:55:45 | 303,961,094 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,931 |
py
|
from wagtail.core import blocks
from wagtail.images.blocks import ImageChooserBlock
class TitleBlock(blocks.StructBlock):
text = blocks.CharBlock(
required = True,
elp_text='Tekst do wyświetlenia',
)
class Meta:
template = 'streams/title_block.html'
icon = 'edycja'
label = 'Tytuł'
help_text = 'Wyśrodkowany tekst do wyświetlenia na stronie.'
class LinkValue(blocks.StructValue):
"""Dodatkowao logika dla lików"""
def url(self) -> str:
internal_page = self.get('internal_page')
external_link = self.get('external_link')
if internal_page:
return internal_page.url
elif external_link:
return external_link
return ''
class Link(blocks.StructBlock):
link_text = blocks.CharBlock(
max_length=50,
default='Więcej szczegółów'
)
interal_page = blocks.PageChooserBlock(
required=False
)
external_link = blocks.URLBlock(
required=False
)
class Meta:
value_class = LinkValue
class Card(blocks.StructBlock):
title = blocks.CharBlock(
max_length=100,
help_text = 'Pogrubiony tytuł tej karty. Maksymalnie 100 znaków.'
)
text = blocks.TextBlock(
max_length=255,
help_text='Opcjonalny tekst tej karty. Maksymalnie 255 znaków.'
)
image = ImageChooserBlock(
help_text = 'Obraz zostanie automatycznie przycięty o 570 na 370 pikseli'
)
link = Link(help_text = 'Wwybierz link')
class CardsBlock(blocks.StructBlock):
cards = blocks.ListBlock(
Card()
)
class Meta:
template = 'streams/card_block.html'
icon = 'image'
label = 'Karty standardowe'
class ImageAndTextBlock(blocks.StructBlock):
image = ImageChooserBlock(help_text='Obraz automatycznie ')
image_alignment
title
text
link - Link()
|
[
"[email protected]"
] | |
9d1e4dd14768158747ff119fc01cb8c311ebdd4d
|
78144baee82268a550400bbdb8c68de524adc68f
|
/Production/python/Summer16v3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8_cff.py
|
9c1d46ac3678e0c9fb9f265b964da728f5774dfe
|
[] |
no_license
|
tklijnsma/TreeMaker
|
e6989c03189b849aff2007bad22e2bfc6922a244
|
248f2c04cc690ef2e2202b452d6f52837c4c08e5
|
refs/heads/Run2_2017
| 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null |
UTF-8
|
Python
| false | false | 10,792 |
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/110000/280DF6F8-7AD0-E811-884A-0025905AC97A.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/110000/3A8373E0-30D3-E811-BB36-5CB901C2A510.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/110000/4A870E50-46D1-E811-8F9A-B05ADA036C70.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/110000/A414A398-7FD3-E811-8D9F-D0BF9C039BA0.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/4ABB6C52-8CD3-E811-A423-3464A9B96F00.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/5A9D98D2-E5D1-E811-AA01-D0BF9C033BB0.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/5C7E66B1-70D0-E811-A909-0025905AC960.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/6A02244F-46D1-E811-BBDA-308D99304920.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/945BD266-71D0-E811-9334-0025905AC822.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/9E43F36D-19D2-E811-963C-D0BF9C033BB0.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/120000/A65097C8-70D0-E811-88A0-0025905AC878.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/270000/20058975-71D0-E811-90A1-0025905AC822.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/270000/46642371-8DD0-E811-A385-5CB901C39220.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/04FAC2F3-85D0-E811-A1D3-0025905AC804.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/12074C68-77D0-E811-BB2C-308D99304920.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/16417F20-E8D1-E811-8DD5-70106F421A38.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/188DBE42-7BD0-E811-BC13-0025905AC95E.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/1A1CFF47-66D2-E811-B1A8-5CB901C39220.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/2491B0B8-EDD1-E811-A921-0025905AC878.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/265F7031-17D2-E811-A3CD-B05ADA036C70.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/2E1F2B37-EED1-E811-AEB8-0025905AC97A.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/3057A8F6-2BD7-E811-8A21-0025905AC960.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/36777B9F-16D2-E811-85E2-308D99304920.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/38089DBD-77D0-E811-BD2A-3464A9B96F00.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/38AD82A6-77D0-E811-87BA-0025905AC97A.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/38B410CF-02D3-E811-BBBE-0025905AC97A.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/3E3C6740-78D0-E811-9589-70106F421A38.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/4247B8DC-76D0-E811-8446-D0BF9C033BB0.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/465CD2A8-77D0-E811-9CB2-0025905AC984.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/4849DDDD-28D2-E811-873F-5CB901C34160.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/4C1E7B4B-52D3-E811-AB67-5CB901C39220.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/5CD3B066-5CD1-E811-BD7D-3464A9B95F50.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/6231669A-8DD0-E811-ACAC-0025905C4432.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/6CB63142-EED1-E811-88FB-3464A9B95F50.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/6EEC0D29-EED1-E811-B5B9-5CB901C34160.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/7262D733-18D2-E811-A1B7-5CB901C39220.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/84746DF9-EDD1-E811-AD9E-0025905C22B0.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/8A40163D-EED1-E811-9FD0-0025905AC97A.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/8A4D8549-7ED2-E811-BD85-5CB901C2A510.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/8CDC2043-78D0-E811-B968-70106F421A38.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/94CC1A77-E7D1-E811-B258-70106F421A38.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/963B5C2A-56D5-E811-BC02-0025905C22B0.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/9AF7C401-22D2-E811-B004-70106F421A38.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/A2F7AA92-8BD0-E811-A224-5CB901C39220.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/A400A9FD-EDD1-E811-ABBA-0025905AC878.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/B2374305-EED1-E811-957F-0025905AF57E.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/B2ED5B9B-EDD1-E811-9E41-3464A9B95F70.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/B80039B5-84D0-E811-A240-0025905AC984.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/BE9FBDEA-64D2-E811-ACC8-70106F42AFF8.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/C4733248-18D2-E811-BEE4-5CB901C39220.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/C668A9E1-EDD1-E811-85B1-3464A9B95F70.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/CA2BA0C8-77D0-E811-BA4F-3464A9B96F00.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/D2FBC84E-80D2-E811-B5ED-3464A9B96F00.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/E829D498-75D0-E811-8BFB-0025905C42D2.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/E87999BD-77D0-E811-9AE7-3464A9B96F00.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/FAD546B8-ECD1-E811-9C16-3464A9B95F70.root',
'/store/mc/RunIISummer16MiniAODv3/QCD_Pt_80to120_TuneCUETP8M1_13TeV_pythia8/MINIAODSIM/PUMoriond17_94X_mcRun2_asymptotic_v3-v1/60000/FE444634-17D2-E811-82EF-B05ADA036C70.root',
] )
|
[
"[email protected]"
] | |
01a3a428407e02aaf5a3e666649d68eaa1e1e1b1
|
8a4f0d4aad4a901bd08fd5eb92b2a31fb1bac167
|
/dizoo/smac/config/smac_MMM_coma_config.py
|
b99dd285cf57a67513b658576e9a62336a1d8eaa
|
[
"Apache-2.0"
] |
permissive
|
lichuminglcm/DI-engine
|
3977eed854dc634f8796764e0a7e0b71b615747f
|
e9052f195d231a9875afb053ba815c6341857571
|
refs/heads/main
| 2023-08-21T05:09:49.931351 | 2021-10-11T12:32:36 | 2021-10-11T12:32:36 | 415,903,070 | 0 | 0 |
Apache-2.0
| 2021-10-11T12:32:37 | 2021-10-11T11:48:26 |
Python
|
UTF-8
|
Python
| false | false | 2,855 |
py
|
import sys
from copy import deepcopy
from ding.entry import serial_pipeline
from easydict import EasyDict
agent_num = 10
collector_env_num = 16
evaluator_env_num = 8
main_config = dict(
env=dict(
map_name='MMM',
difficulty=7,
reward_only_positive=True,
mirror_opponent=False,
agent_num=agent_num,
collector_env_num=collector_env_num,
evaluator_env_num=evaluator_env_num,
shared_memory=False,
stop_value=0.999,
n_evaluator_episode=32,
),
policy=dict(
model=dict(
# (int) agent_num: The number of the agent.
# For SMAC 3s5z, agent_num=8; for 2c_vs_64zg, agent_num=2.
agent_num=agent_num,
# (int) obs_shape: The shapeension of observation of each agent.
# For 3s5z, obs_shape=150; for 2c_vs_64zg, agent_num=404.
# (int) global_obs_shape: The shapeension of global observation.
# For 3s5z, obs_shape=216; for 2c_vs_64zg, agent_num=342.
obs_shape=dict(
agent_state=186,
global_state=290,
),
# (int) action_shape: The number of action which each agent can take.
# action_shape= the number of common action (6) + the number of enemies.
# For 3s5z, obs_shape=14 (6+8); for 2c_vs_64zg, agent_num=70 (6+64).
action_shape=16,
# (List[int]) The size of hidden layer
actor_hidden_size_list=[64],
),
# used in state_num of hidden_state
collect=dict(
n_episode=32,
unroll_len=10,
env_num=collector_env_num,
),
eval=dict(env_num=evaluator_env_num, evaluator=dict(eval_freq=100, )),
other=dict(
eps=dict(
type='exp',
start=0.5,
end=0.01,
decay=200000,
),
replay_buffer=dict(
# (int) max size of replay buffer
replay_buffer_size=5000,
# (int) max use count of data, if count is bigger than this value, the data will be removed from buffer
max_use=10,
),
),
),
)
main_config = EasyDict(main_config)
create_config = dict(
env=dict(
type='smac',
import_names=['dizoo.smac.envs.smac_env'],
),
env_manager=dict(type='subprocess'),
policy=dict(type='coma'),
collector=dict(type='episode', get_train_sample=True),
)
create_config = EasyDict(create_config)
def train(args):
config = [main_config, create_config]
serial_pipeline(config, seed=args.seed)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--seed', '-s', type=int, default=0)
args = parser.parse_args()
train(args)
|
[
"[email protected]"
] | |
00f80ba31ddfb7053bb5c584ada8ce11612618d3
|
1baf76e19a719ebb2207f2af2924fc53349d6a60
|
/internship3_env/bin/black
|
7ae627598807eb9072010262c912bdf4e16afbd3
|
[
"MIT"
] |
permissive
|
Zamy97/internship_3
|
4deb0df914e68930b23faa6bf7e0ca7fd342fbd8
|
9c9db252b6818316e9864839075bb1d23714f7e4
|
refs/heads/master
| 2023-01-01T15:33:45.980776 | 2020-10-28T02:47:34 | 2020-10-28T02:47:34 | 307,861,296 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 338 |
#!/Users/zamy/Desktop/Python_Projects/excl_intrnship_projects/excl_internship_0/internship_3/internship_3/internship3_env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from black import patched_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(patched_main())
|
[
"[email protected]"
] | ||
e92f37657c6db9b8535a58da0709431147348625
|
6413fe58b04ac2a7efe1e56050ad42d0e688adc6
|
/tempenv/lib/python3.7/site-packages/plotly/validators/parcats/line/colorbar/_lenmode.py
|
bcb4aa6f6f7b818eda6ecb5c198c420b34567bf3
|
[
"MIT"
] |
permissive
|
tytechortz/Denver_temperature
|
7f91e0ac649f9584147d59193568f6ec7efe3a77
|
9d9ea31cd7ec003e8431dcbb10a3320be272996d
|
refs/heads/master
| 2022-12-09T06:22:14.963463 | 2019-10-09T16:30:52 | 2019-10-09T16:30:52 | 170,581,559 | 1 | 0 |
MIT
| 2022-06-21T23:04:21 | 2019-02-13T21:22:53 |
Python
|
UTF-8
|
Python
| false | false | 568 |
py
|
import _plotly_utils.basevalidators
class LenmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name='lenmode',
parent_name='parcats.line.colorbar',
**kwargs
):
super(LenmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'colorbars'),
role=kwargs.pop('role', 'info'),
values=kwargs.pop('values', ['fraction', 'pixels']),
**kwargs
)
|
[
"[email protected]"
] | |
b124bf0ba59ebf9333a3780b526609d07c55a3e6
|
ac1938e7513d8e58f2228962b10caa1044a3d8ff
|
/python-fundamentals/39-find_the_duplicate/solution.py
|
eb8b89c18dca0a01d877565a4f8a2dce20c66afe
|
[] |
no_license
|
annikaslund/python_practice
|
fb211cfec725573a3e9f5f358c869e1edd8608a3
|
a6a1586ebbb1883afc8d7920848167955fa258a0
|
refs/heads/master
| 2020-04-22T07:33:32.389883 | 2019-02-14T16:53:53 | 2019-02-14T16:53:53 | 170,220,379 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 181 |
py
|
def find_the_duplicate(nums):
"""Find duplicate number in nums."""
seen = set()
for num in nums:
if num in seen:
return num
seen.add(num)
|
[
"[email protected]"
] | |
659c8d2a968faea0223a4e997f381d01e84cd5bb
|
49edd8549054f63a73c846d0bdf48930703b9aed
|
/app/core/tests/test_commands.py
|
62b7167bf5c90ce7dbf86dd6f42b6fa7e37f8d6e
|
[] |
no_license
|
AlekseiChirkov/recipe-app-api
|
370ccc8239197d700407449e892abd0a804e1504
|
e5d1a0561951b46e0766c96e28f5f4ad707a9bc9
|
refs/heads/main
| 2023-08-23T10:14:50.337020 | 2021-10-04T07:02:19 | 2021-10-04T07:02:19 | 381,278,748 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 890 |
py
|
from unittest.mock import patch
from django.core.management import call_command
from django.db.utils import OperationalError
from django.test import TestCase
class CommandTests(TestCase):
def test_wait_for_db_ready(self):
"""
Test wating for db when db is available
:return:
"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.return_value = True
call_command('wait_for_db')
self.assertEqual(gi.call_count, 1)
@patch('time.sleep', return_value=True)
def test_wait_for_db(self, ts):
"""
Test waiting for db
:return:
"""
with patch('django.db.utils.ConnectionHandler.__getitem__') as gi:
gi.side_effect = [OperationalError] * 5 + [True]
call_command('wait_for_db')
self.assertEqual(gi.call_count, 6)
|
[
"[email protected]"
] | |
54431f70529ad477776902aaf6afb9bed0e1f2d0
|
7ae20e08e736e6df546cb5a80df2baf067686b52
|
/tasks/sprint-3/Финал B - Effective Quick Sort/effective_quick_sort_test.py
|
4938fba3eee95666adf5ae8f300c6c7e8bbbcacb
|
[] |
no_license
|
Grey2k/yandex.praktikum-alghoritms
|
faf466374c932733cc1c5049a2df719d8fd33ac7
|
97b1b4858265b44266a33b834e1e9a1349739048
|
refs/heads/master
| 2023-08-28T02:46:16.502298 | 2021-09-28T19:08:35 | 2021-09-28T19:08:35 | 334,646,281 | 10 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,881 |
py
|
import unittest
import io
from unittest.mock import patch
from effective_quick_sort import main
class EffectiveQuickSortTest(unittest.TestCase):
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 4 100',
'gena 6 1000',
'gosha 2 90',
'rita 2 90',
'timofey 4 80',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_one(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'gena',
'timofey',
'alla',
'gosha',
'rita',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 0 0',
'gena 0 0',
'rita 0 0',
'timofey 0 0',
'gosha 0 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_two(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena',
'gosha',
'rita',
'timofey',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 0',
'gena 0 0',
'gosha 1 100',
'rita 0 0',
'timofey 0 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_three(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gosha',
'gena',
'rita',
'timofey',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 0',
'gena 0 0',
'gosha 1 100',
'rita 2 0',
'timofey 2 100',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_four(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'rita',
'timofey',
'alla',
'gosha',
'gena',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'1',
'alla 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_five(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 1 0',
'gena 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_six(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'gena 1 0',
'alla 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_seven(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'gena 2 10',
'alla 2 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_eight(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'gena 1 10',
'alla 2 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_nine(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 10',
'gena 1 10',
'gosha 1 10',
'rita 2 100',
'timofey 2 100',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_ten(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'rita',
'timofey',
'alla',
'gena',
'gosha',
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 1 0',
'gena 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_eleven(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 2 0',
'gena 2 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_twelve(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'2',
'alla 2 0',
'gena 1 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_thirteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'3',
'alla 1 0',
'gosha 1 0',
'gena 1 0',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_fourteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena',
'gosha'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'3',
'alla 2 0',
'gosha 2 0',
'gena 2 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_fifteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gosha',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'3',
'alla 2 0',
'gosha 1 10',
'gena 1 10',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_sixteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'alla',
'gena',
'gosha'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'5',
'alla 1 100',
'gena 1 1000',
'gosha 1 90',
'rita 1 90',
'timofey 10 80',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_seventeen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'timofey',
'gosha',
'rita',
'alla',
'gena'
]) + '\n')
@patch('sys.stdin', io.StringIO("\n".join([
'13',
'tufhdbi 76 58',
'rqyoazgbmv 59 78',
'qvgtrlkmyrm 35 27',
'tgcytmfpj 70 27',
'xvf 84 19',
'jzpnpgpcqbsmczrgvsu 30 3',
'evjphqnevjqakze 92 15',
'wwzwv 87 8',
'tfpiqpwmkkduhcupp 1 82',
'tzamkyqadmybky 5 81',
'amotrxgba 0 6',
'easfsifbzkfezn 100 28',
'kivdiy 70 47',
])))
@patch('sys.stdout', new_callable=io.StringIO)
def test_input_eighteen(self, stdout):
main()
self.assertEqual(stdout.getvalue(), "\n".join([
'easfsifbzkfezn',
'evjphqnevjqakze',
'wwzwv',
'xvf',
'tufhdbi',
'tgcytmfpj',
'kivdiy',
'rqyoazgbmv',
'qvgtrlkmyrm',
'jzpnpgpcqbsmczrgvsu',
'tzamkyqadmybky',
'tfpiqpwmkkduhcupp',
'amotrxgba',
]) + '\n')
def test_comparator(self):
self.assertEqual((-1, 0, 'alla') > (-2, 0, 'gosha'), True)
self.assertEqual((-1, 0, 'alla') < (-1, 0, 'gosha'), True)
self.assertEqual((-1, 10, 'alla') > (-1, 0, 'gosha'), True)
|
[
"[email protected]"
] | |
3f5d882eb5c278177e7ea02ad9b0bae7cf3d56a7
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/external/chromium_org/chrome/common/extensions/docs/server2/features_bundle_test.py
|
50767975bc1ae25f4024e0185a990181d1544b71
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 |
MIT
| 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null |
UTF-8
|
Python
| false | false | 7,104 |
py
|
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from extensions_paths import EXTENSIONS
from server_instance import ServerInstance
from test_file_system import TestFileSystem
_TEST_FILESYSTEM = {
'api': {
'_api_features.json': json.dumps({
'audioCapture': {
'channel': 'stable',
'extension_types': ['platform_app']
},
'background': [
{
'channel': 'stable',
'extension_types': ['extension']
},
{
'channel': 'stable',
'extension_types': ['platform_app'],
'whitelist': ['im not here']
}
],
'omnibox': {
'dependencies': ['manifest:omnibox'],
'contexts': ['blessed_extension']
},
'syncFileSystem': {
'dependencies': ['permission:syncFileSystem'],
'contexts': ['blessed_extension']
},
'tabs': {
'channel': 'stable',
'extension_types': ['extension', 'legacy_packaged_app'],
'contexts': ['blessed_extension']
},
'test': {
'channel': 'stable',
'extension_types': 'all',
'contexts': [
'blessed_extension', 'unblessed_extension', 'content_script']
},
'windows': {
'dependencies': ['api:tabs'],
'contexts': ['blessed_extension']
}
}),
'_manifest_features.json': json.dumps({
'app.content_security_policy': {
'channel': 'stable',
'extension_types': ['platform_app'],
'min_manifest_version': 2,
'whitelist': ['this isnt happening']
},
'background': {
'channel': 'stable',
'extension_types': ['extension', 'legacy_packaged_app', 'hosted_app']
},
'manifest_version': {
'channel': 'stable',
'extension_types': 'all'
},
'omnibox': {
'channel': 'stable',
'extension_types': ['extension']
},
'page_action': {
'channel': 'stable',
'extension_types': ['extension']
},
'sockets': {
'channel': 'dev',
'extension_types': ['platform_app']
}
}),
'_permission_features.json': json.dumps({
'bluetooth': {
'channel': 'dev',
'extension_types': ['platform_app']
},
'power': {
'channel': 'stable',
'extension_types': [
'extension', 'legacy_packaged_app', 'platform_app'
]
},
'syncFileSystem': {
'channel': 'stable',
'extension_types': ['platform_app']
},
'tabs': {
'channel': 'stable',
'extension_types': ['extension']
}
})
},
'docs': {
'templates': {
'json': {
'manifest.json': json.dumps({
'background': {
'documentation': 'background_pages.html'
},
'manifest_version': {
'documentation': 'manifest/manifest_version.html',
'example': 2,
'level': 'required'
},
'page_action': {
'documentation': 'pageAction.html',
'example': {},
'level': 'only_one'
}
}),
'permissions.json': json.dumps({
'fakeUnsupportedFeature': {},
'syncFileSystem': {
'partial': 'permissions/sync_file_system.html'
},
'tabs': {
'partial': 'permissions/tabs.html'
},
})
}
}
}
}
class FeaturesBundleTest(unittest.TestCase):
def setUp(self):
self._server = ServerInstance.ForTest(
TestFileSystem(_TEST_FILESYSTEM, relative_to=EXTENSIONS))
def testManifestFeatures(self):
expected_features = {
'background': {
'name': 'background',
'channel': 'stable',
'platforms': ['extensions'],
'documentation': 'background_pages.html'
},
'manifest_version': {
'name': 'manifest_version',
'channel': 'stable',
'platforms': ['apps', 'extensions'],
'documentation': 'manifest/manifest_version.html',
'level': 'required',
'example': 2
},
'omnibox': {
'name': 'omnibox',
'channel': 'stable',
'platforms': ['extensions']
},
'page_action': {
'name': 'page_action',
'channel': 'stable',
'platforms': ['extensions'],
'documentation': 'pageAction.html',
'level': 'only_one',
'example': {}
},
'sockets': {
'name': 'sockets',
'channel': 'dev',
'platforms': ['apps']
}
}
self.assertEqual(
expected_features,
self._server.features_bundle.GetManifestFeatures().Get())
def testPermissionFeatures(self):
expected_features = {
'bluetooth': {
'name': 'bluetooth',
'channel': 'dev',
'platforms': ['apps'],
},
'fakeUnsupportedFeature': {
'name': 'fakeUnsupportedFeature',
'platforms': []
},
'power': {
'name': 'power',
'channel': 'stable',
'platforms': ['apps', 'extensions'],
},
'syncFileSystem': {
'name': 'syncFileSystem',
'channel': 'stable',
'platforms': ['apps'],
'partial': 'permissions/sync_file_system.html'
},
'tabs': {
'name': 'tabs',
'channel': 'stable',
'platforms': ['extensions'],
'partial': 'permissions/tabs.html'
}
}
self.assertEqual(
expected_features,
self._server.features_bundle.GetPermissionFeatures().Get())
def testAPIFeatures(self):
expected_features = {
'audioCapture': {
'name': 'audioCapture',
'channel': 'stable',
'platforms': ['apps']
},
'background': {
'name': 'background',
'channel': 'stable',
'platforms': ['extensions']
},
'omnibox': {
'name': 'omnibox',
'platforms': ['extensions'],
'contexts': ['blessed_extension'],
'dependencies': ['manifest:omnibox']
},
'syncFileSystem': {
'name': 'syncFileSystem',
'platforms': ['apps'],
'contexts': ['blessed_extension'],
'dependencies': ['permission:syncFileSystem']
},
'tabs': {
'name': 'tabs',
'channel': 'stable',
'platforms': ['extensions'],
'contexts': ['blessed_extension'],
},
'test': {
'name': 'test',
'channel': 'stable',
'platforms': ['apps', 'extensions'],
'contexts': [
'blessed_extension', 'unblessed_extension', 'content_script'],
},
'windows': {
'name': 'windows',
'platforms': ['extensions'],
'contexts': ['blessed_extension'],
'dependencies': ['api:tabs']
}
}
self.assertEqual(
expected_features,
self._server.features_bundle.GetAPIFeatures().Get())
if __name__ == '__main__':
unittest.main()
|
[
"[email protected]"
] | |
279e0d1e2d470454199c547c791cb5ef62e33742
|
18305efd1edeb68db69880e03411df37fc83b58b
|
/pdb_files3000rot/yw/1ywr/tractability_450/pymol_results_file.py
|
ebb72d9d9960837c67a5b78068b76528df0d8d86
|
[] |
no_license
|
Cradoux/hotspot_pipline
|
22e604974c8e38c9ffa979092267a77c6e1dc458
|
88f7fab8611ebf67334474c6e9ea8fc5e52d27da
|
refs/heads/master
| 2021-11-03T16:21:12.837229 | 2019-03-28T08:31:39 | 2019-03-28T08:31:39 | 170,106,739 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,618 |
py
|
from os.path import join
import tempfile
import zipfile
from pymol import cmd, finish_launching
from pymol.cgo import *
finish_launching()
dirpath = None
def cgo_arrow(atom1='pk1', atom2='pk2', radius=0.07, gap=0.0, hlength=-1, hradius=-1, color='blue red', name=''):
from chempy import cpv
radius, gap = float(radius), float(gap)
hlength, hradius = float(hlength), float(hradius)
try:
color1, color2 = color.split()
except:
color1 = color2 = color
color1 = list(cmd.get_color_tuple(color1))
color2 = list(cmd.get_color_tuple(color2))
def get_coord(v):
if not isinstance(v, str):
return v
if v.startswith('['):
return cmd.safe_list_eval(v)
return cmd.get_atom_coords(v)
xyz1 = get_coord(atom1)
xyz2 = get_coord(atom2)
normal = cpv.normalize(cpv.sub(xyz1, xyz2))
if hlength < 0:
hlength = radius * 3.0
if hradius < 0:
hradius = hlength * 0.6
if gap:
diff = cpv.scale(normal, gap)
xyz1 = cpv.sub(xyz1, diff)
xyz2 = cpv.add(xyz2, diff)
xyz3 = cpv.add(cpv.scale(normal, hlength), xyz2)
obj = [cgo.CYLINDER] + xyz1 + xyz3 + [radius] + color1 + color2 + [cgo.CONE] + xyz3 + xyz2 + [hradius, 0.0] + color2 + color2 + [1.0, 0.0]
return obj
dirpath = tempfile.mkdtemp()
zip_dir = 'out.zip'
with zipfile.ZipFile(zip_dir) as hs_zip:
hs_zip.extractall(dirpath)
cmd.load(join(dirpath,"protein.pdb"), "protein")
cmd.show("cartoon", "protein")
if dirpath:
f = join(dirpath, "label_threshold_10.mol2")
else:
f = "label_threshold_10.mol2"
cmd.load(f, 'label_threshold_10')
cmd.hide('everything', 'label_threshold_10')
cmd.label("label_threshold_10", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_14.mol2")
else:
f = "label_threshold_14.mol2"
cmd.load(f, 'label_threshold_14')
cmd.hide('everything', 'label_threshold_14')
cmd.label("label_threshold_14", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
if dirpath:
f = join(dirpath, "label_threshold_17.mol2")
else:
f = "label_threshold_17.mol2"
cmd.load(f, 'label_threshold_17')
cmd.hide('everything', 'label_threshold_17')
cmd.label("label_threshold_17", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
colour_dict = {'acceptor':'red', 'donor':'blue', 'apolar':'yellow', 'negative':'purple', 'positive':'cyan'}
threshold_list = [10, 14, 17]
gfiles = ['donor.grd', 'apolar.grd', 'acceptor.grd']
grids = ['donor', 'apolar', 'acceptor']
num = 0
surf_transparency = 0.2
if dirpath:
gfiles = [join(dirpath, g) for g in gfiles]
for t in threshold_list:
for i in range(len(grids)):
try:
cmd.load(r'%s'%(gfiles[i]), '%s_%s'%(grids[i], str(num)))
cmd.isosurface('surface_%s_%s_%s'%(grids[i], t, num), '%s_%s'%(grids[i], num), t)
cmd.set('transparency', surf_transparency, 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.color(colour_dict['%s'%(grids[i])], 'surface_%s_%s_%s'%(grids[i], t, num))
cmd.group('threshold_%s'%(t), members = 'surface_%s_%s_%s'%(grids[i],t, num))
cmd.group('threshold_%s' % (t), members='label_threshold_%s' % (t))
except:
continue
try:
cmd.group('hotspot_%s' % (num), members='threshold_%s' % (t))
except:
continue
for g in grids:
cmd.group('hotspot_%s' % (num), members='%s_%s' % (g,num))
cluster_dict = {"16.1989994049":[], "16.1989994049_arrows":[]}
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(-1.0), float(-3.0), float(23.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([-1.0,-3.0,23.5], [-4.133,-2.917,23.047], color="blue red", name="Arrows_16.1989994049_1")
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(2.5), float(-3.0), float(15.0), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.5,-3.0,15.0], [3.509,-5.374,14.235], color="blue red", name="Arrows_16.1989994049_2")
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(2.0), float(3.5), float(19.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.0,3.5,19.5], [4.113,5.844,19.273], color="blue red", name="Arrows_16.1989994049_3")
cluster_dict["16.1989994049"] += [COLOR, 0.00, 0.00, 1.00] + [ALPHA, 0.6] + [SPHERE, float(4.0), float(3.5), float(26.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([4.0,3.5,26.5], [2.411,5.22,24.84], color="blue red", name="Arrows_16.1989994049_4")
cluster_dict["16.1989994049"] += [COLOR, 1.00, 1.000, 0.000] + [ALPHA, 0.6] + [SPHERE, float(2.85864055667), float(-0.56159484147), float(21.1266280078), float(1.0)]
cluster_dict["16.1989994049"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(2.5), float(-3.0), float(16.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.5,-3.0,16.5], [3.933,-5.403,17.063], color="red blue", name="Arrows_16.1989994049_5")
cluster_dict["16.1989994049"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(2.5), float(-3.0), float(16.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([2.5,-3.0,16.5], [3.933,-5.403,17.063], color="red blue", name="Arrows_16.1989994049_6")
cluster_dict["16.1989994049"] += [COLOR, 1.00, 0.00, 0.00] + [ALPHA, 0.6] + [SPHERE, float(4.0), float(3.0), float(22.5), float(1.0)]
cluster_dict["16.1989994049_arrows"] += cgo_arrow([4.0,3.0,22.5], [2.892,6.029,22.446], color="red blue", name="Arrows_16.1989994049_7")
cmd.load_cgo(cluster_dict["16.1989994049"], "Features_16.1989994049", 1)
cmd.load_cgo(cluster_dict["16.1989994049_arrows"], "Arrows_16.1989994049")
cmd.set("transparency", 0.2,"Features_16.1989994049")
cmd.group("Pharmacophore_16.1989994049", members="Features_16.1989994049")
cmd.group("Pharmacophore_16.1989994049", members="Arrows_16.1989994049")
if dirpath:
f = join(dirpath, "label_threshold_16.1989994049.mol2")
else:
f = "label_threshold_16.1989994049.mol2"
cmd.load(f, 'label_threshold_16.1989994049')
cmd.hide('everything', 'label_threshold_16.1989994049')
cmd.label("label_threshold_16.1989994049", "name")
cmd.set("label_font_id", 7)
cmd.set("label_size", -0.4)
cmd.group('Pharmacophore_16.1989994049', members= 'label_threshold_16.1989994049')
cmd.bg_color("white")
cmd.show("cartoon", "protein")
cmd.color("slate", "protein")
cmd.show("sticks", "organic")
cmd.hide("lines", "protein")
|
[
"[email protected]"
] | |
882f0f25e1b5052422d03c13ad8d30b00414aa07
|
4ba50e4df5b7d8d8330135559554b990ce67bce4
|
/tests/__init__.py
|
67087766e181371172841cfcf3dd1f3c7da689ea
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
scottyhq/snowexsql
|
dd0c6741c7d0e0f679606f0d94368df667f4d533
|
c673a746592d2a6dc02547ac11a68a697dfe0d34
|
refs/heads/master
| 2023-04-15T02:44:28.769853 | 2021-05-01T14:49:56 | 2021-05-01T14:49:56 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 39 |
py
|
"""Unit test package for snowexsql."""
|
[
"[email protected]"
] | |
e829b3c4b85543d0c217195b930a908a47eb42ec
|
386d1b6557f4cbaf20794cd222f3b7b8598ef6a6
|
/data/clean_data/A1/81.py
|
8adc1c38d7e03dff806e025dd070c4524f27725c
|
[] |
no_license
|
woowei0102/code2pro
|
3baf86985f911264362963c503f12d20bdc1f89f
|
0b16c62a1cb9053ab59edd7a52e1b3b39fdf66dc
|
refs/heads/main
| 2023-06-28T23:09:23.998798 | 2021-07-13T11:49:27 | 2021-07-13T11:49:27 | 385,585,282 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 621 |
py
|
class Account:
def __init__(self, name):
self.name = name
self.balance = 0
def deposit(self, amount):
self.balance += amount
print('{}存了NT$'.format(self.name) + str(amount) + '元')
def withdraw(self, amount):
if self.balance >= amount:
self.balance -= amount
else:
print('{}的存款不足.'.format(self.name))
def show(self):
print("{}餘額NT${:,.0f}元".format(self.name,self.balance))
userA = Account("Jack")
userA.withdraw(1000)
userA.deposit(5000)
userA.withdraw(1000)
userA.show()
|
[
"[email protected]"
] | |
d1c982f88855f761cac1e63ac2a25c7026cee10c
|
9daf1ecdfc69a1a97998465fae2102f0f2845eb0
|
/deepbond/models/rcnn_crf.py
|
3118eefd7a1f3809a359955b8e9450ee478a342e
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mtreviso/deepbond
|
f3d23de8955f8ff1085b24fe53ebb7ff722a2a7f
|
a36ccb71e4457889d340920260f18666835d703f
|
refs/heads/master
| 2023-04-07T22:58:34.650115 | 2023-03-15T16:27:58 | 2023-03-15T16:27:58 | 114,040,073 | 17 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,573 |
py
|
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence as pack
from torch.nn.utils.rnn import pad_packed_sequence as unpack
from deepbond import constants
from deepbond.initialization import init_xavier, init_kaiming
from deepbond.models.model import Model
from deepbond.modules.crf import CRF
class RCNNCRF(Model):
"""Recurrent Convolutional Neural Network + CRF.
As described in: https://arxiv.org/pdf/1610.00211.pdf
"""
def __init__(self, words_field, tags_field, options):
super().__init__(words_field, tags_field)
#
# Embeddings
#
word_embeddings = None
if self.words_field.vocab.vectors is not None:
word_embeddings = self.words_field.vocab.vectors
options.word_embeddings_size = word_embeddings.size(1)
self.word_emb = nn.Embedding(
num_embeddings=len(self.words_field.vocab),
embedding_dim=options.word_embeddings_size,
padding_idx=constants.PAD_ID,
_weight=word_embeddings,
)
self.dropout_emb = nn.Dropout(options.emb_dropout)
if options.freeze_embeddings:
self.word_emb.weight.requires_grad = False
features_size = options.word_embeddings_size
#
# CNN 1D
#
self.cnn_1d = nn.Conv1d(in_channels=features_size,
out_channels=options.conv_size,
kernel_size=options.kernel_size,
padding=options.kernel_size // 2)
self.max_pool = nn.MaxPool1d(options.pool_length,
padding=options.pool_length // 2)
self.dropout_cnn = nn.Dropout(options.cnn_dropout)
self.relu = torch.nn.ReLU()
features_size = (options.conv_size // options.pool_length +
options.pool_length // 2)
#
# RNN
#
self.is_bidir = options.bidirectional
self.sum_bidir = options.sum_bidir
self.rnn_type = options.rnn_type
if self.rnn_type == 'gru':
rnn_class = nn.GRU
elif self.rnn_type == 'lstm':
rnn_class = nn.LSTM
else:
rnn_class = nn.RNN
hidden_size = options.hidden_size[0]
self.rnn = rnn_class(features_size,
hidden_size,
bidirectional=self.is_bidir,
batch_first=True)
self.dropout_rnn = nn.Dropout(options.rnn_dropout)
self.sigmoid = torch.nn.Sigmoid()
features_size = hidden_size
eos_tag_id = self.tags_field.vocab.stoi['.'] if '.' in self.tags_field.vocab.stoi else self.tags_field.vocab.stoi['_']
self.crf = CRF(
self.nb_classes,
bos_tag_id=self.tags_field.vocab.stoi['_'], # hack
eos_tag_id=eos_tag_id,
pad_tag_id=None,
batch_first=True,
)
#
# Linear
#
n = 1 if not self.is_bidir or self.sum_bidir else 2
self.linear_out = nn.Linear(n * features_size, self.nb_classes)
self.init_weights()
self.is_built = True
def init_weights(self):
if self.cnn_1d is not None:
init_kaiming(self.cnn_1d, dist='uniform', nonlinearity='relu')
if self.rnn is not None:
init_xavier(self.rnn, dist='uniform')
if self.linear_out is not None:
init_xavier(self.linear_out, dist='uniform')
def build_loss(self, loss_weights=None):
self._loss = self.crf
def loss(self, emissions, gold):
mask = gold != constants.TAGS_PAD_ID
crf_gold = gold.clone()
crf_gold[mask == 0] = 0
return self._loss(emissions, crf_gold, mask=mask.float())
def predict_classes(self, batch):
emissions = self.forward(batch)
mask = batch.words != constants.PAD_ID
_, path = self.crf.decode(emissions, mask=mask[:, 2:].float())
return [torch.tensor(p) for p in path]
def predict_proba(self, batch):
raise Exception('Predict() probability is not available.')
def forward(self, batch):
assert self.is_built
assert self._loss is not None
h = batch.words
mask = h != constants.PAD_ID
lengths = mask.int().sum(dim=-1)
# (bs, ts) -> (bs, ts, emb_dim)
h = self.word_emb(h)
h = self.dropout_emb(h)
# Turn (bs, ts, emb_dim) into (bs, emb_dim, ts) for CNN
h = h.transpose(1, 2)
# (bs, emb_dim, ts) -> (bs, conv_size, ts)
h = self.relu(self.cnn_1d(h))
# Turn (bs, conv_size, ts) into (bs, ts, conv_size) for Pooling
h = h.transpose(1, 2)
# (bs, ts, conv_size) -> (bs, ts, pool_size)
h = self.max_pool(h)
h = self.dropout_cnn(h)
# (bs, ts, pool_size) -> (bs, ts, hidden_size)
h = pack(h, lengths, batch_first=True, enforce_sorted=False)
h, _ = self.rnn(h)
h, _ = unpack(h, batch_first=True)
# if you'd like to sum instead of concatenate:
if self.sum_bidir:
h = (h[:, :, :self.rnn.hidden_size] +
h[:, :, self.rnn.hidden_size:])
h = self.sigmoid(h)
# apply dropout
h = self.dropout_rnn(h)
# (bs, ts, hidden_size) -> (bs, ts, nb_classes)
h = self.linear_out(h)
# remove <bos> and <eos> tokens
# (bs, ts, nb_classes) -> (bs, ts-2, nb_classes)
h = h[:, 1:-1, :]
return h
|
[
"[email protected]"
] | |
1cd352d1bca1e800029113e0addaac329f0597b1
|
4f7dc1bd5a5561c9f3fb693f0d6f4c6b13504db6
|
/library/v0.5/analysis_tools/kinase_enrichment/kinase_enrichment.py
|
00278ff36b510fa6436063457fa111a67be34ad7
|
[] |
no_license
|
bluejek128/microglia
|
296b02d21f82f0769c18e3fa7e63eadd374e4965
|
f9f2281c277d1b71ca80e26cc071fa096e653e68
|
refs/heads/master
| 2020-03-13T21:48:14.750943 | 2018-05-07T20:43:32 | 2018-05-07T20:43:32 | 131,304,292 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,204 |
py
|
#################################################################
#################################################################
############### DE
#################################################################
#################################################################
#############################################
########## 1. Load libraries
#############################################
##### 1. General support #####
import qgrid, requests, json
import pandas as pd
import numpy as np
from IPython.display import display, Markdown, HTML
##### 2. Other libraries #####
#######################################################
#######################################################
########## Support
#######################################################
#######################################################
#############################################
########## 1. Get Enrichr Results
#############################################
def get_enrichr_results(user_list_id, gene_set_libraries, overlappingGenes=True):
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/enrich'
query_string = '?userListId=%s&backgroundType=%s'
results = []
for gene_set_library in gene_set_libraries:
response = requests.get(
ENRICHR_URL + query_string % (user_list_id, gene_set_library)
)
if not response.ok:
raise Exception('Error fetching enrichment results')
data = json.loads(response.text)
resultDataframe = pd.DataFrame(data[gene_set_library], columns=['rank', 'term_name', 'pvalue', 'zscore', 'combined_score', 'overlapping_genes', 'FDR', 'old_pvalue', 'old_FDR'])
selectedColumns = ['term_name','zscore','combined_score','pvalue', 'FDR'] if not overlappingGenes else ['term_name','zscore','combined_score','FDR', 'pvalue', 'overlapping_genes']
resultDataframe = resultDataframe.loc[:,selectedColumns]
resultDataframe['gene_set_library'] = gene_set_library
results.append(resultDataframe)
concatenatedDataframe = pd.concat(results)
return concatenatedDataframe
#############################################
########## 2. Display Result Table
#############################################
def results_table(enrichment_dataframe, source_label, target_label):
# Get libraries
for gene_set_library in enrichment_dataframe['gene_set_library'].unique():
# Get subset
enrichment_dataframe_subset = enrichment_dataframe[enrichment_dataframe['gene_set_library'] == gene_set_library].copy()
# Get unique values from source column
enrichment_dataframe_subset[source_label] = [x.split('_')[0] for x in enrichment_dataframe_subset['term_name']]
enrichment_dataframe_subset = enrichment_dataframe_subset.sort_values(['FDR', 'pvalue']).rename(columns={'pvalue': 'P-value'}).drop_duplicates(source_label)
# Add links and bold for significant results
enrichment_dataframe_subset[source_label] = ['<a href="http://amp.pharm.mssm.edu/Harmonizome/gene/{x}" target="_blank">{x}</a>'.format(**locals()) for x in enrichment_dataframe_subset[source_label]]
enrichment_dataframe_subset[source_label] = [rowData[source_label].replace('target="_blank">', 'target="_blank"><b>').replace('</a>', '*</b></a>') if rowData['FDR'] < 0.05 else rowData[source_label] for index, rowData in enrichment_dataframe_subset.iterrows()]
# Add rank
enrichment_dataframe_subset['Rank'] = ['<b>'+str(x+1)+'</b>' for x in range(len(enrichment_dataframe_subset.index))]
# Add overlapping genes with tooltip
enrichment_dataframe_subset['nr_overlapping_genes'] = [len(x) for x in enrichment_dataframe_subset['overlapping_genes']]
enrichment_dataframe_subset['overlapping_genes'] = [', '.join(x) for x in enrichment_dataframe_subset['overlapping_genes']]
enrichment_dataframe_subset[target_label.title()] = ['{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s' for index, rowData in enrichment_dataframe_subset.iterrows()]
# enrichment_dataframe[target_label.title()] = ['<span class="gene-tooltip">{nr_overlapping_genes} {geneset} '.format(**rowData)+target_label+'s<div class="gene-tooltip-text">{overlapping_genes}</div></span>'.format(**rowData) for index, rowData in enrichment_dataframe.iterrows()]
# Convert to HTML
pd.set_option('max.colwidth', -1)
html_table = enrichment_dataframe_subset.head(50)[['Rank', source_label, 'P-value', 'FDR', target_label.title()]].to_html(escape=False, index=False, classes='w-100')
html_results = '<div style="max-height: 200px; overflow-y: scroll;">{}</div>'.format(html_table)
# Add CSS
display(HTML('<style>.w-100{width: 100%;} .text-left th{text-align: left !important;}</style>'))
display(HTML('<style>.slick-cell{overflow: visible;}.gene-tooltip{text-decoration: underline; text-decoration-style: dotted;}.gene-tooltip .gene-tooltip-text{visibility: hidden; position: absolute; left: 60%; width: 250px; z-index: 1000; text-align: center; background-color: black; color: white; padding: 5px 10px; border-radius: 5px;} .gene-tooltip:hover .gene-tooltip-text{visibility: visible;} .gene-tooltip .gene-tooltip-text::after {content: " ";position: absolute;bottom: 100%;left: 50%;margin-left: -5px;border-width: 5px;border-style: solid;border-color: transparent transparent black transparent;}</style>'))
# Display gene set
display(Markdown('### A. KEA (experimentally validated substrates)' if gene_set_library == 'KEA_2015' else '### B. ARCHS4 (coexpressed genes)'))
# Display table
display(HTML(html_results))
#######################################################
#######################################################
########## S1. Function
#######################################################
#######################################################
#############################################
########## 1. Run
#############################################
def run(enrichr_results, signature_label):
# Initialize results
results = []
# Loop through genesets
for geneset in ['upregulated', 'downregulated']:
# Append ChEA results
enrichment_dataframe = get_enrichr_results(enrichr_results[geneset]['userListId'], gene_set_libraries=['KEA_2015', 'ARCHS4_Kinases_Coexp'])
enrichment_dataframe['geneset'] = geneset
results.append(enrichment_dataframe)
# Concatenate results
tf_dataframe = pd.concat(results)
return {'tf_dataframe': tf_dataframe, 'signature_label': signature_label}
#############################################
########## 2. Plot
#############################################
def plot(kinase_enrichment_results, plot_counter):
results_table(kinase_enrichment_results['tf_dataframe'].copy(), source_label='Kinase', target_label='substrate')
# Figure Legend
display(Markdown('** Table '+plot_counter('table')+' | Kinase Enrichment Analysis Results. **The figure contains browsable tables displaying the results of the Protein Kinase (PK) enrichment analysis generated using Enrichr. Every row represents a PK; significant PKs are highlighted in bold. A displays results generated using KEA, indicating PKs whose experimentally validated substrates are enriched. C displays results generated using the ARCHS4 library, indicating PKs whose top coexpressed genes (according to the ARCHS4 dataset) are enriched.'.format(**locals())))
|
[
"[email protected]"
] | |
c1080f3be20f44fea54a411bbe22c1ee9d6f684d
|
fe4f2aeb889f939ea6caf4a34371a3558064abcd
|
/vqa/model_vlmap_answer_full.py
|
f1185b07099b9fce81c71ccfbf3435a883d84cff
|
[
"MIT"
] |
permissive
|
HyeonwooNoh/VQA-Transfer-ExternalData
|
cf9c1b82dd55389dfe5f52d8fd196780dd3d4629
|
d21b700bcdc3ba3c392ff793b3f5efe23eb68ed6
|
refs/heads/master
| 2021-10-25T22:58:00.318492 | 2019-04-08T04:52:46 | 2019-04-08T04:52:46 | 122,662,354 | 21 | 3 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,656 |
py
|
import cPickle
import h5py
import os
import numpy as np
import tensorflow as tf
from util import log
from vlmap import modules
W_DIM = 300 # Word dimension
L_DIM = 1024 # Language dimension
V_DIM = 1024
class Model(object):
def __init__(self, batch, config, is_train=True):
self.batch = batch
self.config = config
self.image_dir = config.image_dir
self.is_train = is_train
self.word_weight_dir = getattr(config, 'vlmap_word_weight_dir', None)
if self.word_weight_dir is None:
log.warn('word_weight_dir is None')
self.losses = {}
self.report = {}
self.mid_result = {}
self.vis_image = {}
self.global_step = tf.train.get_or_create_global_step(graph=None)
self.latent_loss_weight = tf.convert_to_tensor(0.1)
self.report['model_step'] = self.global_step
self.report['latent_loss_weight'] = self.latent_loss_weight
self.vocab = cPickle.load(open(config.vocab_path, 'rb'))
self.answer_dict = cPickle.load(open(
os.path.join(config.tf_record_dir, 'answer_dict.pkl'), 'rb'))
self.num_answer = len(self.answer_dict['vocab'])
self.num_train_answer = self.answer_dict['num_train_answer']
self.train_answer_mask = tf.expand_dims(tf.sequence_mask(
self.num_train_answer, maxlen=self.num_answer, dtype=tf.float32),
axis=0)
self.test_answer_mask = 1.0 - self.train_answer_mask
self.glove_map = modules.LearnGloVe(self.vocab)
self.answer_exist_mask = modules.AnswerExistMask(
self.answer_dict, self.word_weight_dir)
log.infov('loading image features...')
with h5py.File(config.vfeat_path, 'r') as f:
self.features = np.array(f.get('image_features'))
log.infov('feature done')
self.spatials = np.array(f.get('spatial_features'))
log.infov('spatials done')
self.normal_boxes = np.array(f.get('normal_boxes'))
log.infov('normal_boxes done')
self.num_boxes = np.array(f.get('num_boxes'))
log.infov('num_boxes done')
self.max_box_num = int(f['data_info']['max_box_num'].value)
self.vfeat_dim = int(f['data_info']['vfeat_dim'].value)
log.infov('done')
self.build()
def filter_train_vars(self, trainable_vars):
train_vars = []
for var in trainable_vars:
if var.name.split('/')[0] == 'q_linear_l': pass
elif var.name.split('/')[0] == 'pooled_linear_l': pass
elif var.name.split('/')[0] == 'joint_fc': pass
elif var.name.split('/')[0] == 'WordWeightAnswer': pass
else: train_vars.append(var)
return train_vars
def filter_transfer_vars(self, all_vars):
transfer_vars = []
for var in all_vars:
if var.name.split('/')[0] == 'q_linear_l':
transfer_vars.append(var)
elif var.name.split('/')[0] == 'pooled_linear_l':
transfer_vars.append(var)
elif var.name.split('/')[0] == 'joint_fc':
transfer_vars.append(var)
return transfer_vars
def build(self):
"""
build network architecture and loss
"""
"""
Visual features
"""
with tf.device('/cpu:0'):
def load_feature(image_idx):
selected_features = np.take(self.features, image_idx, axis=0)
return selected_features
V_ft = tf.py_func(
load_feature, inp=[self.batch['image_idx']], Tout=tf.float32,
name='sample_features')
V_ft.set_shape([None, self.max_box_num, self.vfeat_dim])
num_V_ft = tf.gather(self.num_boxes, self.batch['image_idx'],
name='gather_num_V_ft', axis=0)
self.mid_result['num_V_ft'] = num_V_ft
normal_boxes = tf.gather(self.normal_boxes, self.batch['image_idx'],
name='gather_normal_boxes', axis=0)
self.mid_result['normal_boxes'] = normal_boxes
log.warning('v_linear_v')
v_linear_v = modules.fc_layer(
V_ft, V_DIM, use_bias=True, use_bn=False, use_ln=True,
activation_fn=tf.nn.relu, is_training=self.is_train,
scope='v_linear_v')
"""
Encode question
"""
q_embed = tf.nn.embedding_lookup(self.glove_map, self.batch['q_intseq'])
# [bs, L_DIM]
q_L_ft = modules.encode_L(q_embed, self.batch['q_intseq_len'], L_DIM,
cell_type='GRU')
q_L_mean = modules.fc_layer(
q_L_ft, L_DIM, use_bias=True, use_bn=False, use_ln=False,
activation_fn=None, is_training=self.is_train,
scope='q_L_mean')
q_L_log_sigma_sq = modules.fc_layer(
q_L_ft, L_DIM, use_bias=True, use_bn=False, use_ln=False,
activation_fn=None, is_training=self.is_train,
scope='q_L_log_sigma_sq')
q_L_sigma = tf.sqrt(tf.exp(q_L_log_sigma_sq))
noise = tf.random_normal(tf.shape(q_L_mean), mean=0, stddev=1, seed=123)
q_L_mean_noise = q_L_mean + noise * q_L_sigma
# [bs, V_DIM}
log.warning('q_linear_v')
q_linear_v = modules.fc_layer(
q_L_ft, V_DIM, use_bias=True, use_bn=False, use_ln=True,
activation_fn=tf.nn.relu, is_training=self.is_train,
scope='q_linear_v')
self.mid_result['q_linear_v'] = q_linear_v
"""
Perform attention
"""
att_score = modules.hadamard_attention(
v_linear_v, num_V_ft, q_linear_v,
use_ln=False, is_train=self.is_train)
self.mid_result['att_score'] = att_score
pooled_V_ft = modules.attention_pooling(V_ft, att_score)
self.mid_result['pooled_V_ft'] = pooled_V_ft
"""
Answer classification
"""
log.warning('pooled_linear_l')
pooled_linear_l = modules.fc_layer(
pooled_V_ft, L_DIM, use_bias=True, use_bn=False, use_ln=True,
activation_fn=tf.nn.relu, is_training=self.is_train,
scope='pooled_linear_l')
self.mid_result['pooled_linear_l'] = pooled_linear_l
log.warning('q_linear_l')
l_linear_l = modules.fc_layer(
q_L_mean_noise, L_DIM, use_bias=True, use_bn=False, use_ln=True,
activation_fn=tf.nn.relu, is_training=self.is_train,
scope='q_linear_l')
self.mid_result['l_linear_l'] = l_linear_l
joint = modules.fc_layer(
pooled_linear_l * l_linear_l, L_DIM * 2,
use_bias=True, use_bn=False, use_ln=True,
activation_fn=tf.nn.relu, is_training=self.is_train, scope='joint_fc')
joint = tf.nn.dropout(joint, 0.5)
self.mid_result['joint'] = joint
logit = modules.WordWeightAnswer(
joint, self.answer_dict, self.word_weight_dir,
use_bias=True, is_training=self.is_train, scope='WordWeightAnswer')
self.mid_result['logit'] = logit
"""
Compute loss and accuracy
"""
with tf.name_scope('loss'):
answer_target = self.batch['answer_target']
loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=answer_target, logits=logit)
train_loss = tf.reduce_mean(tf.reduce_sum(
loss * self.train_answer_mask, axis=-1))
report_loss = tf.reduce_mean(tf.reduce_sum(loss, axis=-1))
pred = tf.cast(tf.argmax(logit, axis=-1), dtype=tf.int32)
one_hot_pred = tf.one_hot(pred, depth=self.num_answer,
dtype=tf.float32)
acc = tf.reduce_mean(
tf.reduce_sum(one_hot_pred * answer_target, axis=-1))
exist_acc = tf.reduce_mean(
tf.reduce_sum(one_hot_pred * answer_target * self.answer_exist_mask,
axis=-1))
test_acc = tf.reduce_mean(
tf.reduce_sum(one_hot_pred * answer_target * self.test_answer_mask,
axis=-1))
max_exist_answer_acc = tf.reduce_mean(
tf.reduce_max(answer_target * self.answer_exist_mask, axis=-1))
test_max_answer_acc = tf.reduce_mean(
tf.reduce_max(answer_target * self.test_answer_mask, axis=-1))
test_max_exist_answer_acc = tf.reduce_mean(
tf.reduce_max(answer_target * self.answer_exist_mask *
self.test_answer_mask, axis=-1))
normal_test_acc = tf.where(
tf.equal(test_max_answer_acc, 0),
test_max_answer_acc,
test_acc / test_max_answer_acc)
latent_loss = self.latent_loss(q_L_mean, q_L_log_sigma_sq)
self.mid_result['pred'] = pred
self.losses['answer'] = train_loss
self.losses['latent'] = self.latent_loss_weight * latent_loss
self.report['latent_loss'] = latent_loss
self.report['train_latent_loss'] = self.losses['latent']
self.report['answer_train_loss'] = train_loss
self.report['answer_report_loss'] = report_loss
self.report['answer_accuracy'] = acc
self.report['exist_answer_accuracy'] = exist_acc
self.report['test_answer_accuracy'] = test_acc
self.report['normal_test_answer_accuracy'] = normal_test_acc
self.report['max_exist_answer_accuracy'] = max_exist_answer_acc
self.report['test_max_answer_accuracy'] = test_max_answer_acc
self.report['test_max_exist_answer_accuracy'] = test_max_exist_answer_acc
"""
Prepare image summary
"""
"""
with tf.name_scope('prepare_summary'):
self.vis_image['image_attention_qa'] = self.visualize_vqa_result(
self.batch['image_id'],
self.mid_result['normal_boxes'], self.mid_result['num_V_ft'],
self.mid_result['att_score'],
self.batch['q_intseq'], self.batch['q_intseq_len'],
self.batch['answer_target'], self.mid_result['pred'],
max_batch_num=20, line_width=2)
"""
self.loss = 0
for key, loss in self.losses.items():
self.loss = self.loss + loss
# scalar summary
for key, val in self.report.items():
tf.summary.scalar('train/{}'.format(key), val,
collections=['heavy_train', 'train'])
tf.summary.scalar('val/{}'.format(key), val,
collections=['heavy_val', 'val'])
tf.summary.scalar('testval/{}'.format(key), val,
collections=['heavy_testval', 'testval'])
# image summary
for key, val in self.vis_image.items():
tf.summary.image('train-{}'.format(key), val, max_outputs=10,
collections=['heavy_train'])
tf.summary.image('val-{}'.format(key), val, max_outputs=10,
collections=['heavy_val'])
tf.summary.image('testval-{}'.format(key), val, max_outputs=10,
collections=['heavy_testval'])
return self.loss
def latent_loss(self, z_mu, z_log_sigma_sq):
latent_loss = tf.reduce_sum(
1 + z_log_sigma_sq - tf.square(z_mu) - tf.exp(z_log_sigma_sq),
axis=-1)
return -0.5 * tf.reduce_mean(latent_loss)
|
[
"[email protected]"
] | |
6043f98f1ed60196e5ff93332d829900c7068f56
|
b2cf4b4900a3a8728c673af47df8fe6b577900ed
|
/experiments/src/ex29/ex29_6_final.py
|
3e32695b484e0ed2e12f1853a57b571fe1622683
|
[] |
no_license
|
gabormakrai/landuseregression
|
7b7b2a2e78de263a5db765c3cf4d827e2023b771
|
ac103485dc846758c2bf75e10d52080e334af61e
|
refs/heads/master
| 2021-01-20T01:13:57.867355 | 2018-09-24T22:19:49 | 2018-09-24T22:19:49 | 42,457,548 | 10 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,669 |
py
|
from ex29.crossvalidation import splitDataForXValidation,\
splitDataForXValidationWithLocation
from data.data import loadData
from sklearn.ensemble.forest import RandomForestRegressor,\
RandomForestClassifier
from eval.rmse import rmseEval
OUTPUT_FILE = "/experiments/ex29/ex29_6_final.csv"
OUTPUT_LOG_FILE = "/experiments/ex29/ex29_6_final.txt"
DATA_FILE = "/data/london3_hour_2016.csv"
groups = [['71.0', '70.0', '38.0', '55.0', '91.0', '73.0', '89.0'],
['5.0', '29.0', '53.0', '67.0', '49.0', '50.0', '44.0'],
['80.0', '81.0', '31.0', '14.0', '78.0', '46.0', '7.0'],
['69.0', '52.0', '26.0', '79.0', '9.0', '16.0', '13.0'],
['15.0', '57.0', '24.0', '19.0', '43.0', '33.0', '51.0']]
all_features = ['building_area', 'building_count', 'natural_area', 'leisure_area', 'landuse_area', 'lane_length', 'length', 'atc', 'windspeed', 'windspeed', 'rain', 'temperature', 'humidity', 'pressure', 'hour', 'day_of_week', 'month', 'bank_holiday']
tw_features = ['winddirection', 'windspeed', 'rain', 'temperature', 'humidity', 'pressure', 'hour', 'day_of_week', 'month', 'bank_holiday']
twa_features = ['winddirection', 'windspeed', 'rain', 'temperature', 'humidity', 'pressure', 'hour', 'day_of_week', 'month', 'bank_holiday', 'atc']
enabled_classification_features = [False, False, True, True, True, False, True, True, False, False, False, False, False, True, True, False, False, True]
def create_model():
return RandomForestRegressor(min_samples_leaf = 29, n_estimators = 64, n_jobs = -1, random_state=42)
def create_classifier_model():
return RandomForestClassifier(max_depth=20, n_estimators = 100, n_jobs = -1, random_state=42)
output_log = open(OUTPUT_LOG_FILE, "w")
output = open(OUTPUT_FILE, "w")
output.write("prediction,observation,station,model\n")
# load the data
data = {}
columns = []
loadData(DATA_FILE, [], data, columns)
def log(line):
output_log.write(line)
output_log.write("\n")
output_log.flush()
print(line)
log("all_features: " + str(all_features))
log("tw_features: " + str(tw_features))
log("twa_features: " + str(twa_features))
classification_features = [all_features[i] for i in range(0, len(all_features)) if enabled_classification_features[i]]
log("classification_features: " + str(classification_features))
def generate_label(observations, tw_predictions, twa_predictions):
label = []
for i in range(0, len(observations)):
tw_abs = abs(observations[i] - tw_predictions[i])
twa_abs = abs(observations[i] - twa_predictions[i])
if tw_abs < twa_abs:
label.append(0)
else:
label.append(1)
return label
def generate_combined_prediction(classifier_prediction, prediction_tw, prediction_twa):
pred = []
for i in range(0, len(classifier_prediction)):
if classifier_prediction[i] == 0:
pred.append(prediction_tw[i])
else:
pred.append(prediction_twa[i])
return pred
def generate_train_test_station_list(group, groups):
train_stations = []
for i in range(0, len(groups)):
if i != group:
train_stations.extend(groups[i])
test_stations = groups[group]
return train_stations, test_stations
all_observations = []
all_pred_ALL = []
all_pred_TW = []
all_pred_TWA = []
all_pred_combined = []
all_test_location = []
for group in range(0, len(groups)):
log("group: " + str(group + 1))
train_stations, test_stations = generate_train_test_station_list(group, groups)
log("\ttrain_stations: " + str(train_stations))
log("\ttest_stations: " + str(test_stations))
train_station_set = set([float(s) for s in train_stations])
test_station_set = set([float(s) for s in test_stations])
trainX, testX, trainY, testY, trainLocation, testLocation = splitDataForXValidationWithLocation(train_station_set, test_station_set, "location", data, all_features, "target")
model = RandomForestRegressor(min_samples_leaf = 29, n_estimators = 64, n_jobs = -1, random_state=42)
model.fit(trainX, trainY)
prediction_TW = model.predict(testX)
rmse = rmseEval(testY, prediction_TW)[1]
log("\tALL rmse: " + str(rmse))
all_observations.extend(testY)
all_pred_ALL.extend(prediction_TW)
all_test_location.extend(testLocation)
trainX, testX, trainY, testY, trainLocation, testLocation = splitDataForXValidationWithLocation(train_station_set, test_station_set, "location", data, tw_features, "target")
model = RandomForestRegressor(min_samples_leaf = 29, n_estimators = 64, n_jobs = -1, random_state=42)
model.fit(trainX, trainY)
prediction_TW = model.predict(testX)
rmse = rmseEval(testY, prediction_TW)[1]
log("\tTW rmse: " + str(rmse))
all_pred_TW.extend(prediction_TW)
trainX, testX, trainY, testY, trainLocation, testLocation = splitDataForXValidationWithLocation(train_station_set, test_station_set, "location", data, twa_features, "target")
model = RandomForestRegressor(min_samples_leaf = 29, n_estimators = 64, n_jobs = -1, random_state=42)
model.fit(trainX, trainY)
prediction_TWA = model.predict(testX)
rmse = rmseEval(testY, prediction_TWA)[1]
log("\tTWA rmse: " + str(rmse))
all_pred_TWA.extend(prediction_TWA)
group2s = [groups[i] for i in range(0, len(groups)) if i != group]
log("group2s: " + str(group2s))
#combination
classifier_X = []
classifier_Y = []
for group2 in range(0, len(group2s)):
train_stations, test_stations = generate_train_test_station_list(group2, group2s)
log("\ttrain_stations: " + str(train_stations))
log("\ttest_stations: " + str(test_stations))
train_station_set = set([float(s) for s in train_stations])
test_station_set = set([float(s) for s in test_stations])
trainX, testX, trainY, testY = splitDataForXValidation(train_station_set, test_station_set, "location", data, tw_features, "target")
model = RandomForestRegressor(min_samples_leaf = 29, n_estimators = 64, n_jobs = -1, random_state=42)
model.fit(trainX, trainY)
prediction_3groups_TW = model.predict(testX)
trainX, testX, trainY, testY = splitDataForXValidation(train_station_set, test_station_set, "location", data, twa_features, "target")
model = RandomForestRegressor(min_samples_leaf = 29, n_estimators = 64, n_jobs = -1, random_state=42)
model.fit(trainX, trainY)
prediction_3groups_TWA = model.predict(testX)
trainX, testX, trainY, testY = splitDataForXValidation(train_station_set, test_station_set, "location", data, classification_features, "target")
classifier_X.extend(testX)
label = generate_label(testY, prediction_3groups_TW, prediction_3groups_TWA)
classifier_Y.extend(label)
train_stations, test_stations = generate_train_test_station_list(group, groups)
log("\ttrain_stations: " + str(train_stations))
log("\ttest_stations: " + str(test_stations))
train_station_set = set([float(s) for s in train_stations])
test_station_set = set([float(s) for s in test_stations])
model = create_classifier_model()
model.fit(classifier_X, classifier_Y)
_, testX, _, testY, trainLocation, testLocation = splitDataForXValidationWithLocation(train_station_set, test_station_set, "location", data, classification_features, "target")
classifier_prediction = model.predict(testX)
combined_prediction = generate_combined_prediction(classifier_prediction, prediction_TW, prediction_TWA)
rmse = rmseEval(testY, combined_prediction)[1]
log("\tTW+TWA:" + str(rmse))
all_pred_combined.extend(combined_prediction)
rmse = rmseEval(all_observations, all_pred_ALL)[1]
log("ALL rmse:" + str(rmse))
rmse = rmseEval(all_observations, all_pred_TW)[1]
log("TW rmse:" + str(rmse))
rmse = rmseEval(all_observations, all_pred_TWA)[1]
log("TWA rmse:" + str(rmse))
rmse = rmseEval(all_observations, all_pred_combined)[1]
log("TW+TWA rmse:" + str(rmse))
for i in range(0, len(all_observations)):
obs = all_observations[i]
pred_ALL = all_pred_ALL[i]
pred_TW = all_pred_TW[i]
pred_TWA = all_pred_TWA[i]
pred_combined = all_pred_combined[i]
location = all_test_location[i]
output.write(str(obs) + "," + str(pred_ALL) + ",RFR_ALL," + str(location) + "\n")
output.write(str(obs) + "," + str(pred_TW) + ",RFR_TW," + str(location) + "\n")
output.write(str(obs) + "," + str(pred_TWA) + ",RFR_TWA," + str(location) + "\n")
output.write(str(obs) + "," + str(pred_combined) + ",RFR_combined," + str(location) + "\n")
output_log.close()
output.close()
|
[
"[email protected]"
] | |
43a4b48b9af6e391d2d94d872ba672bbdee47e83
|
bc441bb06b8948288f110af63feda4e798f30225
|
/agent_admin_sdk/model/topology/link_pb2.py
|
57425310345e86fec127525b11152a17ac288f7c
|
[
"Apache-2.0"
] |
permissive
|
easyopsapis/easyops-api-python
|
23204f8846a332c30f5f3ff627bf220940137b6b
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
refs/heads/master
| 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | true | 3,202 |
py
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: link.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from agent_admin_sdk.model.topology import linkStyle_pb2 as agent__admin__sdk_dot_model_dot_topology_dot_linkStyle__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='link.proto',
package='topology',
syntax='proto3',
serialized_options=_b('ZBgo.easyops.local/contracts/protorepo-models/easyops/model/topology'),
serialized_pb=_b('\n\nlink.proto\x12\x08topology\x1a.agent_admin_sdk/model/topology/linkStyle.proto\"J\n\x04Link\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x0e\n\x06target\x18\x02 \x01(\t\x12\"\n\x05style\x18\x03 \x01(\x0b\x32\x13.topology.LinkStyleBDZBgo.easyops.local/contracts/protorepo-models/easyops/model/topologyb\x06proto3')
,
dependencies=[agent__admin__sdk_dot_model_dot_topology_dot_linkStyle__pb2.DESCRIPTOR,])
_LINK = _descriptor.Descriptor(
name='Link',
full_name='topology.Link',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='topology.Link.source', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target', full_name='topology.Link.target', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='style', full_name='topology.Link.style', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=72,
serialized_end=146,
)
_LINK.fields_by_name['style'].message_type = agent__admin__sdk_dot_model_dot_topology_dot_linkStyle__pb2._LINKSTYLE
DESCRIPTOR.message_types_by_name['Link'] = _LINK
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Link = _reflection.GeneratedProtocolMessageType('Link', (_message.Message,), {
'DESCRIPTOR' : _LINK,
'__module__' : 'link_pb2'
# @@protoc_insertion_point(class_scope:topology.Link)
})
_sym_db.RegisterMessage(Link)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"[email protected]"
] | |
9a8385ebac75bfafb7f8a0eded1d52e017c2102c
|
fd281c5c50d31c32ff3724d6cfc9534d8bf65b06
|
/artigos/migrations/0002_auto_20170922_1603.py
|
fcd70e91a35936ae7094e5bb9b521c2d1b548346
|
[] |
no_license
|
thiagorocha06/mairimed-site
|
72ef24cdf0bdc016dac821bb3d8117283a6d9f52
|
5537755ced8c1e4ff8641686acf241b254e50670
|
refs/heads/master
| 2021-01-22T17:48:17.227737 | 2017-10-29T10:39:26 | 2017-10-29T10:39:26 | 100,734,508 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,052 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-09-22 19:03
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('artigos', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='artigo',
name='ef_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='ef_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='ef_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='ef_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top5',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='etio_top6',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top5',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='exames_top6',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top1',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top2',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top3',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top4',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top5',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='med_top6',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='artigo',
name='top1',
field=models.TextField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
a4f6c54a5d544fc14b830f131426060871b97721
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/59/usersdata/161/48984/submittedfiles/testes.py
|
aa8ccc2aca043bef467a8eafb70d9d924c95f684
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 169 |
py
|
n=int(input('numero:'))
soma=0
for i in range(0,n+1,1):
fat=1
for a in range(1,i+1,1):
fat=fat*a
soma=soma+(1/math.factorial)
print(soma)
|
[
"[email protected]"
] | |
6ccd2c5d4edca415ab24a1f3efd1a03ab45a84e7
|
773f6abee91e5368e43b34d8ad179c4ab9056da1
|
/gen/wellknownfiletype.py
|
fb3c550fa4fa8b577314a4ae3c13e8a54548196e
|
[] |
no_license
|
richstoner/aibs
|
3dc9489ee6a1db836d58ec736b13d35a7cffc215
|
bfc7e732b53b4dff55f7c3edccdd0703f4bab25f
|
refs/heads/master
| 2021-01-10T05:11:09.484238 | 2013-03-03T06:19:34 | 2013-03-03T06:19:34 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
# -*- coding: utf-8 -*-
# Rich Stoner, 2013
class WellKnownFileType(object):
'''aibs.model.wellknownfiletype (autogen)'''
# Fields
self.id = 0
self.name = ''
# Associations
self.well_known_files = [] # has_many WellKnownFile
def __init__(self, initialData={}):
for k,v in initData.iteritems():
setattr(self, k, v)
# add class methods and private methods here
|
[
"[email protected]"
] | |
1368e722eb797feff1eca7bb87f37bd18411b067
|
150d9e4cee92be00251625b7f9ff231cc8306e9f
|
/RemoveDupLL.py
|
356fd8431d9a51ac2bd0a44d1c699d17ce8499ff
|
[] |
no_license
|
JerinPaulS/Python-Programs
|
0d3724ce277794be597104d9e8f8becb67282cb0
|
d0778178d89d39a93ddb9b95ca18706554eb7655
|
refs/heads/master
| 2022-05-12T02:18:12.599648 | 2022-04-20T18:02:15 | 2022-04-20T18:02:15 | 216,547,245 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,390 |
py
|
'''
Given the head of a sorted linked list, delete all duplicates such that each element appears only once. Return the linked list sorted as well.
Example 1:
Input: head = [1,1,2]
Output: [1,2]
Example 2:
Input: head = [1,1,2,3,3]
Output: [1,2,3]
Constraints:
The number of nodes in the list is in the range [0, 300].
-100 <= Node.val <= 100
The list is guaranteed to be sorted in ascending order.
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
current = head
start = head
if current is None:
return(head)
else:
current = current.next
while current:
if current.val == start.val:
current = current.next
else:
start.next = current
start = start.next
start.next = None
return(head)
def print_l(head):
while head:
print(head.val)
head = head.next
obj = Solution()
head = ListNode(1)
#head.next = ListNode(1)
#head.next.next = ListNode(2)
#head.next.next.next = ListNode(3)
#head.next.next.next.next = ListNode(3)
print_l(obj.deleteDuplicates(head))
|
[
"[email protected]"
] | |
bd9bf52af07739506da5af2853530d5eb1039364
|
3d65a2d72e65083c752281368cf040ae977e4757
|
/analysis_modules/create_false_odor_packets.py
|
073d56d041990cffb7401e49375a73b6e39bf9ef
|
[] |
no_license
|
florisvb/OdorAnalysis
|
6b4b2c32979b9139856aee20cc63c34cfe63819e
|
18beae8d3c6be271f171b1c36c9fd932a8a404ba
|
refs/heads/master
| 2020-06-03T14:48:34.962795 | 2012-10-23T22:28:21 | 2012-10-23T22:28:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,652 |
py
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import copy
import data_fit
import odor_dataset as od
import fly_plot_lib.plot as fpl
import matplotlib
def play_movie_from_model(gm=None):
fig = plt.figure()
t_start = 13
anim_params = {'t': t_start, 'xlim': [0,1], 'ylim': [-.1,.1], 't_max': 19, 'dt': 0.05, 'resolution': 0.001}
array, extent = gm.get_array_2d(anim_params['t'], anim_params['xlim'], anim_params['ylim'], anim_params['resolution'])
im = plt.imshow( array, cmap=plt.get_cmap('jet'))
def updatefig(*args):
anim_params['t'] += anim_params['dt']
if anim_params['t'] > anim_params['t_max']:
anim_params['t'] = 10
array, extent = gm.get_array_2d(anim_params['t'], anim_params['xlim'], anim_params['ylim'], anim_params['resolution'])
im.set_array(array)
return im,
ani = animation.FuncAnimation(fig, updatefig, anim_params, interval=50, blit=True)
plt.show()
def make_false_odor_trace(gm, timestamps, position):
x = position[0]
y = position[1]
inputs = [timestamps, [x, y]]
trace = gm.get_val(inputs)
odor_trace = od.Odor_Trace(position)
odor_trace.trace = trace
odor_trace.timestamps = timestamps
return odor_trace
def make_false_odor_dataset(gm=None, timestamps=None, positions=None):
if gm is None:
parameters = { 'mean_0_intercept': 0,
'mean_0_slope': .2,
'mean_1_intercept': 0.16,
'mean_1_slope': 0,
'std_0_intercept': 0.2,
'std_0_slope': 0.05,
'std_1_intercept': 0.05,
'std_1_slope': 0.02,
'magnitude': 1,
}
gm = data_fit.models.GaussianModel2D_TimeVarying(parameters=parameters)
if timestamps is None:
t_max = 3
dt = 0.002
timestamps = np.arange(0,t_max,dt)
if positions is None:
if 0:
positions = [[0, .165], [.1, .165], [.2, .165], [.3, .165], [.4, .165], [.5, .165], [.6, .165],
[0, .175], [.1, .175], [.2, .175], [.3, .175], [.4, .175], [.5, .175], [.6, .175],
[0, .135], [.1, .135], [.2, .135], [.3, .135], [.4, .135], [.5, .135], [.6, .135],
[0, .195], [.1, .195], [.2, .195], [.3, .195], [.4, .195], [.5, .195], [.6, .195],
[0, .155], [.1, .155], [.2, .155], [.3, .155], [.4, .155], [.5, .155], [.6, .155]]
if 1:
positions = []
x_pos = np.arange(0,1,.05).tolist()
y_pos = np.arange(0, .33, .05).tolist()
for i, x in enumerate(x_pos):
for j, y in enumerate(y_pos):
positions.append( [x,y] )
print positions[-1]
for position in positions:
position = np.array(positions)
odor_dataset = od.Dataset()
key = 0
for position in positions:
odor_trace = make_false_odor_trace(gm, timestamps, position)
odor_dataset.odor_traces.setdefault(key, odor_trace)
key += 1
return odor_dataset
def fit_1d_gaussian(odor_dataset, t, axis=0, keys=None, plot=True, lims=[-1,1], ignore_parameter_names=[]):
if keys is None:
keys = odor_dataset.odor_traces.keys()
ordinate = []
odor = []
odor_std = []
for key in keys:
odor_trace = odor_dataset.odor_traces[key]
ordinate.append( odor_trace.position[axis] )
index_at_t = np.argmin( np.abs(odor_trace.timestamps - t) )
odor.append( odor_trace.voltage[index_at_t] )
odor_std.append( odor_trace.voltage_std[index_at_t] )
ordinate = np.array(ordinate)
odor = np.array(odor)
odor_std = np.array(odor_std)
print ordinate
print odor
# now fit gaussian to data
gm = data_fit.models.GaussianModel1D()
inputs = [ordinate]
#return odor, ordinate
gm.fit_with_guess(odor, inputs, ignore_parameter_names=ignore_parameter_names)
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(ordinate, odor, 'ob')
for i, pt in enumerate(ordinate):
ax.vlines(pt, odor[i]-odor_std[i], odor[i]+odor_std[i], linewidth=2)
x = np.arange(lims[0], lims[1], 0.001)
vals = gm.get_val(x)
ax.plot(x, vals)
fpl.adjust_spines(ax, ['left', 'bottom'])
ax.set_xlabel('x position, m')
ax.set_ylabel('odor value, ethanol')
ax.set_title('mean and std dev of measured odor values and gaussian fit')
return gm, ordinate, odor, odor_std
def fit_1d_gaussian_time_varying(odor_dataset, tmin=15.2, tmax=18, tres=0.1, num=None, colormap='jet', tres_for_plot=0.5, axis=0, keys=None, lims=None, ignore_parameter_names=[], plot=True):
if lims is None:
if axis==0:
lims = [-.3,1]
elif axis==1:
lims = [-.1,.1]
if keys is None:
sets, keys_to_position = od.find_sets(odor_dataset,axis)
lengths_of_sets = np.asarray([len(s) for s in sets.values()])
set_to_use = np.argmax(lengths_of_sets)
if num is not None:
set_to_use = num
keys = sets[sets.keys()[set_to_use]]
timestamps = np.arange(tmin, tmax, tres)
ordinate = []
odor = []
odor_std = []
for t in timestamps:
odor_data_at_time_t = []
odor_std_data_at_time_t = []
for key in keys:
odor_trace = odor_dataset.odor_traces[key]
odor_at_time_t = np.interp(t, odor_trace.timestamps, odor_trace.voltage)
odor_data_at_time_t.append(odor_at_time_t)
odor_std_at_time_t = np.interp(t, odor_trace.timestamps, odor_trace.voltage_std)
odor_std_data_at_time_t.append(odor_std_at_time_t)
if t == timestamps[0]:
ordinate.append( odor_trace.position[axis] )
odor.append(np.array(odor_data_at_time_t))
odor_std.append(np.array(odor_std_data_at_time_t))
ordinate = np.array(ordinate)
# now fit gaussian to data
gm = data_fit.models.GaussianModel1D_TimeVarying()
inputs = [ordinate]
gm.fit(timestamps, odor, inputs)
if plot:
fig = plt.figure()
ax = fig.add_subplot(111)
norm = matplotlib.colors.Normalize(tmin, tmax)
cmap = matplotlib.cm.ScalarMappable(norm, colormap)
t_arr = np.arange(tmin, tmax, tres_for_plot)
for t in t_arr:
# find index of timestamps array that corresponds to this t
index = np.argmin(np.abs(timestamps-t))
color = cmap.to_rgba(t)
for i, pt in enumerate(ordinate):
ax.vlines(pt, odor[index][i]-odor_std[index][i], odor[index][i]+odor_std[index][i], color='black', linewidth=1)
ax.plot(ordinate, odor[index], 'o', color=color, markersize=8)
x = np.arange(lims[0], lims[1], 0.001)
vals = gm.get_val([t, [x]])
ax.plot(x, vals, color=color, linewidth=2)
fpl.adjust_spines(ax, ['left', 'bottom'])
ax.set_xlabel('x position, m')
ax.set_ylabel('odor value, ethanol')
ax.set_title('mean and std dev of measured odor values and time varying gaussian fit\ncolor=time')
return gm
# fitting routine
# sets, keys_to_positions = od.find_sets(odor_dataset, axis=0, min_keys_in_set=4, threshold=0.004)
#################################################################################################
# 2D Gaussian stuff - doesn't work with the data I've collected
#################################################################################################
def fit_2d_gaussian(odor_dataset, t=13.6, keys=None):
#od.calc_windspeed(odor_dataset, position=[0, 0.16])
if keys is None:
keys = odor_dataset.odor_traces.keys()
# guess for center:
#x0_guess = t*odor_dataset.windspeed + 0
x0_guess = 0
y0_guess = -0.01
x = []
y = []
odor = []
for key in keys:
odor_trace = odor_dataset.odor_traces[key]
x.append( odor_trace.position[0] )
y.append( odor_trace.position[1] )
index_at_t = np.argmin( np.abs(odor_trace.timestamps - t) )
odor.append( odor_trace.voltage[index_at_t] )
x = np.array(x)
y = np.array(y)
odor = np.array(odor)
print x
print y
print odor
# now fit gaussian to data
gm = data_fit.models.GaussianModel2D()
inputs = [x,y]
gm.fit_with_guess(odor, inputs)
fig = plt.figure()
ax = fig.add_subplot(111)
gm.show_fit(odor, inputs, ax=ax, lims=[[-.2,1], [-.1,.1]], resolution=0.001)
ax.set_xlabel('x position, m')
ax.set_ylabel('y position, m')
ax.set_title('Odor heatmap (gaussian fit), one point in time')
return gm
def fit_2d_gaussian_moving(odor_dataset):
t_list = np.arange(0, 2, .1)
gm_list = []
for t in t_list:
gm = fit_2d_gaussian(odor_dataset, t=t, plot=False)
gm_list.append(gm)
mean_0_list = np.zeros_like(t_list)
std_0_list = np.zeros_like(t_list)
mean_1_list = np.zeros_like(t_list)
std_1_list = np.zeros_like(t_list)
magnitude_list = np.zeros_like(t_list)
for i, gm in enumerate(gm_list):
mean_0_list[i] = gm.parameters['mean_0']
std_0_list[i] = gm.parameters['std_0']
mean_1_list[i] = gm.parameters['mean_1']
std_1_list[i] = gm.parameters['std_1']
magnitude_list[i] = gm.parameters['magnitude']
parameter_list = [mean_0_list, std_0_list, mean_1_list, std_1_list, magnitude_list]
lm_list = []
for i, param in enumerate(parameter_list):
lm = data_fit.models.LinearModel(parameters={'slope': 1, 'intercept': 0})
print parameter_list[i]
lm.fit(parameter_list[i], t_list)
print lm.parameters
print
lm_list.append(copy.copy(lm))
return gm_list, parameter_list, lm_list
def fit_2d_gaussian_moving_builtin(odor_dataset):
timestamps = np.arange(13, 16, .01)
def get_positions(odor_dataset):
x = []
y = []
for key, odor_trace in odor_dataset.odor_traces.items():
x.append( odor_trace.position[0] )
y.append( odor_trace.position[1] )
return [np.array(x), np.array(y)]
def get_odor(timestamps, odor_dataset):
odor = []
for t in timestamps:
odor_at_t = []
for key, odor_trace in odor_dataset.odor_traces.items():
index_at_t = np.argmin( np.abs(odor_trace.timestamps - t) )
odor_at_t.append( odor_trace.voltage[index_at_t] )
odor.append(np.array(odor_at_t))
return odor
positions = get_positions(odor_dataset)
odor = get_odor(timestamps, odor_dataset)
gm = data_fit.models.GaussianModel2D_TimeVarying()
gm.fit(timestamps, odor, positions)
return gm
|
[
"[email protected]"
] | |
d8c01001bdd05781a5065833b44d905e0e0a580d
|
c8bdf62ce953b37cd79c9666b669c0aa938fef91
|
/selfdrive/mapd/default_speeds_generator.py
|
59436a7b70a4cd918a47a08d1c6a2a507b086754
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
richardyu-au/openpilot
|
0a7b70e29888494e25013f38680b726114f05421
|
27e0adc434906e37fd7674160a0a8c2dd5c3c180
|
refs/heads/devel
| 2020-04-29T02:03:56.521067 | 2019-03-11T19:13:18 | 2019-03-11T19:13:18 | 175,751,297 | 0 | 0 |
MIT
| 2019-03-15T04:59:35 | 2019-03-15T04:59:34 | null |
UTF-8
|
Python
| false | false | 9,481 |
py
|
#!/usr/bin/env python
import json
OUTPUT_FILENAME = "default_speeds_by_region.json"
def main():
countries = []
"""
--------------------------------------------------
US - United State of America
--------------------------------------------------
"""
US = Country("US") # First step, create the country using the ISO 3166 two letter code
countries.append(US) # Second step, add the country to countries list
""" Default rules """
# Third step, add some default rules for the country
# Speed limit rules are based on OpenStreetMaps (OSM) tags.
# The dictionary {...} defines the tag_name: value
# if a road in OSM has a tag with the name tag_name and this value, the speed limit listed below will be applied.
# The text at the end is the speed limit (use no unit for km/h)
# Rules apply in the order in which they are written for each country
# Rules for specific regions (states) take priority over country rules
# If you modify existing country rules, you must update all existing states without that rule to use the old rule
US.add_rule({"highway": "motorway"}, "65 mph") # On US roads with the tag highway and value motorway, the speed limit will default to 65 mph
US.add_rule({"highway": "trunk"}, "55 mph")
US.add_rule({"highway": "primary"}, "55 mph")
US.add_rule({"highway": "secondary"}, "45 mph")
US.add_rule({"highway": "tertiary"}, "35 mph")
US.add_rule({"highway": "unclassified"}, "55 mph")
US.add_rule({"highway": "residential"}, "25 mph")
US.add_rule({"highway": "service"}, "25 mph")
US.add_rule({"highway": "motorway_link"}, "55 mph")
US.add_rule({"highway": "trunk_link"}, "55 mph")
US.add_rule({"highway": "primary_link"}, "55 mph")
US.add_rule({"highway": "secondary_link"}, "45 mph")
US.add_rule({"highway": "tertiary_link"}, "35 mph")
US.add_rule({"highway": "living_street"}, "15 mph")
""" States """
new_york = US.add_region("New York") # Fourth step, add a state/region to country
new_york.add_rule({"highway": "primary"}, "45 mph") # Fifth step , add rules to the state. See the text above for how to write rules
new_york.add_rule({"highway": "secondary"}, "55 mph")
new_york.add_rule({"highway": "tertiary"}, "55 mph")
new_york.add_rule({"highway": "residential"}, "30 mph")
new_york.add_rule({"highway": "primary_link"}, "45 mph")
new_york.add_rule({"highway": "secondary_link"}, "55 mph")
new_york.add_rule({"highway": "tertiary_link"}, "55 mph")
# All if not written by the state, the rules will default to the country rules
#california = US.add_region("California")
# California uses only the default US rules
michigan = US.add_region("Michigan")
michigan.add_rule({"highway": "motorway"}, "70 mph")
oregon = US.add_region("Oregon")
oregon.add_rule({"highway": "motorway"}, "55 mph")
oregon.add_rule({"highway": "secondary"}, "35 mph")
oregon.add_rule({"highway": "tertiary"}, "30 mph")
oregon.add_rule({"highway": "service"}, "15 mph")
oregon.add_rule({"highway": "secondary_link"}, "35 mph")
oregon.add_rule({"highway": "tertiary_link"}, "30 mph")
south_dakota = US.add_region("South Dakota")
south_dakota.add_rule({"highway": "motorway"}, "80 mph")
south_dakota.add_rule({"highway": "trunk"}, "70 mph")
south_dakota.add_rule({"highway": "primary"}, "65 mph")
south_dakota.add_rule({"highway": "trunk_link"}, "70 mph")
south_dakota.add_rule({"highway": "primary_link"}, "65 mph")
wisconsin = US.add_region("Wisconsin")
wisconsin.add_rule({"highway": "trunk"}, "65 mph")
wisconsin.add_rule({"highway": "tertiary"}, "45 mph")
wisconsin.add_rule({"highway": "unclassified"}, "35 mph")
wisconsin.add_rule({"highway": "trunk_link"}, "65 mph")
wisconsin.add_rule({"highway": "tertiary_link"}, "45 mph")
"""
--------------------------------------------------
AU - Australia
--------------------------------------------------
"""
AU = Country("AU")
countries.append(AU)
""" Default rules """
AU.add_rule({"highway": "motorway"}, "100")
AU.add_rule({"highway": "trunk"}, "80")
AU.add_rule({"highway": "primary"}, "80")
AU.add_rule({"highway": "secondary"}, "50")
AU.add_rule({"highway": "tertiary"}, "50")
AU.add_rule({"highway": "unclassified"}, "80")
AU.add_rule({"highway": "residential"}, "50")
AU.add_rule({"highway": "service"}, "40")
AU.add_rule({"highway": "motorway_link"}, "90")
AU.add_rule({"highway": "trunk_link"}, "80")
AU.add_rule({"highway": "primary_link"}, "80")
AU.add_rule({"highway": "secondary_link"}, "50")
AU.add_rule({"highway": "tertiary_link"}, "50")
AU.add_rule({"highway": "living_street"}, "30")
"""
--------------------------------------------------
CA - Canada
--------------------------------------------------
"""
CA = Country("CA")
countries.append(CA)
""" Default rules """
CA.add_rule({"highway": "motorway"}, "100")
CA.add_rule({"highway": "trunk"}, "80")
CA.add_rule({"highway": "primary"}, "80")
CA.add_rule({"highway": "secondary"}, "50")
CA.add_rule({"highway": "tertiary"}, "50")
CA.add_rule({"highway": "unclassified"}, "80")
CA.add_rule({"highway": "residential"}, "40")
CA.add_rule({"highway": "service"}, "40")
CA.add_rule({"highway": "motorway_link"}, "90")
CA.add_rule({"highway": "trunk_link"}, "80")
CA.add_rule({"highway": "primary_link"}, "80")
CA.add_rule({"highway": "secondary_link"}, "50")
CA.add_rule({"highway": "tertiary_link"}, "50")
CA.add_rule({"highway": "living_street"}, "20")
"""
--------------------------------------------------
DE - Germany
--------------------------------------------------
"""
DE = Country("DE")
countries.append(DE)
""" Default rules """
DE.add_rule({"highway": "motorway"}, "none")
DE.add_rule({"highway": "living_street"}, "10")
DE.add_rule({"zone:traffic": "DE:rural"}, "100")
DE.add_rule({"zone:traffic": "DE:urban"}, "50")
DE.add_rule({"bicycle_road": "yes"}, "30")
""" --- DO NOT MODIFY CODE BELOW THIS LINE --- """
""" --- ADD YOUR COUNTRY OR STATE ABOVE --- """
# Final step
write_json(countries)
def write_json(countries):
out_dict = {}
for country in countries:
out_dict.update(country.jsonify())
json_string = json.dumps(out_dict, indent=2)
with open(OUTPUT_FILENAME, "wb") as f:
f.write(json_string)
class Region(object):
ALLOWABLE_TAG_KEYS = ["highway", "zone:traffic", "bicycle_road"]
ALLOWABLE_HIGHWAY_TYPES = ["motorway", "trunk", "primary", "secondary", "tertiary", "unclassified", "residential", "service", "motorway_link", "trunk_link", "primary_link", "secondary_link", "tertiary_link", "living_street"]
def __init__(self, name):
self.name = name
self.rules = []
def add_rule(self, tag_conditions, speed):
new_rule = {}
if not isinstance(tag_conditions, dict):
raise TypeError("Rule tag conditions must be dictionary")
if not all(tag_key in self.ALLOWABLE_TAG_KEYS for tag_key in tag_conditions):
raise ValueError("Rule tag keys must be in allowable tag kesy") # If this is by mistake, please update ALLOWABLE_TAG_KEYS
if 'highway' in tag_conditions:
if not tag_conditions['highway'] in self.ALLOWABLE_HIGHWAY_TYPES:
raise ValueError("Invalid Highway type {}".format(tag_conditions["highway"]))
new_rule['tags'] = tag_conditions
try:
new_rule['speed'] = str(speed)
except ValueError:
raise ValueError("Rule speed must be string")
self.rules.append(new_rule)
def jsonify(self):
ret_dict = {}
ret_dict[self.name] = self.rules
return ret_dict
class Country(Region):
ALLOWABLE_COUNTRY_CODES = ["AF","AX","AL","DZ","AS","AD","AO","AI","AQ","AG","AR","AM","AW","AU","AT","AZ","BS","BH","BD","BB","BY","BE","BZ","BJ","BM","BT","BO","BQ","BA","BW","BV","BR","IO","BN","BG","BF","BI","KH","CM","CA","CV","KY","CF","TD","CL","CN","CX","CC","CO","KM","CG","CD","CK","CR","CI","HR","CU","CW","CY","CZ","DK","DJ","DM","DO","EC","EG","SV","GQ","ER","EE","ET","FK","FO","FJ","FI","FR","GF","PF","TF","GA","GM","GE","DE","GH","GI","GR","GL","GD","GP","GU","GT","GG","GN","GW","GY","HT","HM","VA","HN","HK","HU","IS","IN","ID","IR","IQ","IE","IM","IL","IT","JM","JP","JE","JO","KZ","KE","KI","KP","KR","KW","KG","LA","LV","LB","LS","LR","LY","LI","LT","LU","MO","MK","MG","MW","MY","MV","ML","MT","MH","MQ","MR","MU","YT","MX","FM","MD","MC","MN","ME","MS","MA","MZ","MM","NA","NR","NP","NL","NC","NZ","NI","NE","NG","NU","NF","MP","NO","OM","PK","PW","PS","PA","PG","PY","PE","PH","PN","PL","PT","PR","QA","RE","RO","RU","RW","BL","SH","KN","LC","MF","PM","VC","WS","SM","ST","SA","SN","RS","SC","SL","SG","SX","SK","SI","SB","SO","ZA","GS","SS","ES","LK","SD","SR","SJ","SZ","SE","CH","SY","TW","TJ","TZ","TH","TL","TG","TK","TO","TT","TN","TR","TM","TC","TV","UG","UA","AE","GB","US","UM","UY","UZ","VU","VE","VN","VG","VI","WF","EH","YE","ZM","ZW"]
def __init__(self, ISO_3166_alpha_2):
Region.__init__(self, ISO_3166_alpha_2)
if ISO_3166_alpha_2 not in self.ALLOWABLE_COUNTRY_CODES:
raise ValueError("Not valid IOS 3166 country code")
self.regions = {}
def add_region(self, name):
self.regions[name] = Region(name)
return self.regions[name]
def jsonify(self):
ret_dict = {}
ret_dict[self.name] = {}
for r_name, region in self.regions.iteritems():
ret_dict[self.name].update(region.jsonify())
ret_dict[self.name]['Default'] = self.rules
return ret_dict
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
96fbda29f56474fb36c91a465db699b27f1b825b
|
09f0505f3ac1dccaf301c1e363423f38768cc3cc
|
/r_DailyProgrammer/Intermediate/C302/__init__.py
|
5c911bdc5a9c77577af17aa4a5625a99607d6350
|
[] |
no_license
|
Awesome-Austin/PythonPractice
|
02212292b92814016d062f0fec1c990ebde21fe7
|
9a717f91d41122be6393f9fcd1a648c5e62314b3
|
refs/heads/master
| 2023-06-21T11:43:59.366064 | 2021-07-29T23:33:00 | 2021-07-29T23:33:00 | 270,854,302 | 0 | 0 | null | 2020-08-11T20:47:10 | 2020-06-08T23:24:09 |
Python
|
UTF-8
|
Python
| false | false | 71 |
py
|
#! python3
from r_DailyProgrammer.Intermediate.C302.main import main
|
[
"{ID}+{username}@users.noreply.github.com"
] |
{ID}+{username}@users.noreply.github.com
|
2e5aa056829a7e404aebb2f952bfc8b7aa726fe6
|
9b5d0b7d7c9cdaef2851b675292e5eef651ab257
|
/tools/extract/liftOver_wrapper.py
|
6b23580be746f0f57f2fa12ee10ca9983d07cffb
|
[
"CC-BY-2.5",
"MIT"
] |
permissive
|
msGenDev/Yeps-EURAC
|
392fd497a6891a5a22204b236c26dcd133793f21
|
7b679ea17ba294893cc560354d759cfd61c0b450
|
refs/heads/master
| 2021-01-16T21:49:26.499975 | 2010-04-05T17:52:50 | 2010-04-05T17:52:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,909 |
py
|
#!/usr/bin/env python
#Guruprasad Ananda
"""
Converts coordinates from one build/assembly to another using liftOver binary and mapping files downloaded from UCSC.
"""
import sys, os, string
import tempfile
import re
assert sys.version_info[:2] >= ( 2, 4 )
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def safe_bed_file(infile):
"""Make a BED file with track and browser lines ready for liftOver.
liftOver will fail with track or browser lines. We can make it happy
by converting these to comments. See:
https://lists.soe.ucsc.edu/pipermail/genome/2007-May/013561.html
"""
fix_pat = re.compile("^(track|browser)")
(fd, fname) = tempfile.mkstemp()
in_handle = open(infile)
out_handle = open(fname, "w")
for line in in_handle:
if fix_pat.match(line):
line = "#" + line
out_handle.write(line)
in_handle.close()
out_handle.close()
return fname
if len( sys.argv ) != 7:
stop_err( "USAGE: prog input out_file1 out_file2 input_dbkey output_dbkey minMatch" )
infile = sys.argv[1]
outfile1 = sys.argv[2]
outfile2 = sys.argv[3]
in_dbkey = sys.argv[4]
mapfilepath = sys.argv[5]
minMatch = sys.argv[6]
try:
assert float(minMatch)
except:
minMatch = 0.1
#ensure dbkey is set
if in_dbkey == "?":
stop_err( "Input dataset genome build unspecified, click the pencil icon in the history item to specify it." )
if not os.path.isfile( mapfilepath ):
stop_err( "%s mapping is not currently available." % ( mapfilepath.split('/')[-1].split('.')[0] ) )
safe_infile = safe_bed_file(infile)
cmd_line = "liftOver -minMatch=" + str(minMatch) + " " + safe_infile + " " + mapfilepath + " " + outfile1 + " " + outfile2 + " > /dev/null 2>&1"
try:
os.system( cmd_line )
except Exception, exc:
stop_err( "Exception caught attempting conversion: %s" % str( exc ) )
finally:
os.remove(safe_infile)
|
[
"[email protected]"
] | |
017a4d9314cc977d5c80644063b57eaa990b050d
|
b0174911702ab63f7ba0d0ca4cb03ae6453dc182
|
/calas7262/service/interfaces.py
|
9c34585091728f72ed26c481c929d96a16b1d145
|
[
"MIT"
] |
permissive
|
astrorafael/calas7262
|
4001bffdc586b91677095ac4f112170911c93e7c
|
8ff4c0ce5bf670fe0bf6fde218ecd7c993c41d0e
|
refs/heads/master
| 2020-08-01T18:12:39.020668 | 2019-10-28T10:07:18 | 2019-10-28T10:07:18 | 211,072,096 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,426 |
py
|
# ----------------------------------------------------------------------
# Copyright (c) 2014 Rafael Gonzalez.
#
# See the LICENSE file for details
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
from __future__ import division, absolute_import
# ---------------
# Twisted imports
# ---------------
from zope.interface import implementer, Interface
class IPausable(Interface):
"""
A pausable interface for services.
Run pause/resume code at the appropriate times.
@type paused: C{boolean}
@ivar paused: Whether the service is paused.
"""
def pauseService():
"""
Pauses the service. It can take a while, so it returns a Deferred
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, C{None}).
"""
def resumeService():
"""
Resumes the service. It can take a while, so it returns a Deferred
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished shutting down. If shutting down is immediate,
a value can be returned (usually, C{None}).
"""
class IReloadable(Interface):
"""
A reloadable interface for services.
Run reload code at the appropriate times.
"""
def reloadService(config=None):
"""
Reloads the service by reading on the fly its service configuration.
Configuration can be stored be a file (more likely) or a database.
If C{config} is C{None}, then the service must find out what changed
may be reading a configuration file (most likely) or a database.
Otherwise, C{config} as an object or data type meaningful for the
service itself passeb by a container I{IReloadable} C{MultiCervice}.
@type config: any meaningful datatype or object.
@rtype: L{Deferred<defer.Deferred>}
@return: a L{Deferred<defer.Deferred>} which is triggered when the
service has finished reloading. If reloading is immediate,
a value can be returned (usually, C{None}).
"""
__all__ = [ "IReloadable", "IPausable" ]
|
[
"[email protected]"
] | |
12617fabbf89c88ca061ddde97c6781271a3d367
|
7357d367b0af4650ccc5b783b7a59090fdde47bb
|
/neo/Core/TX/IssueTransaction.py
|
8113d4c98a96ad128375898867a454f12b1368c5
|
[
"MIT"
] |
permissive
|
BarracudaPff/code-golf-data-python
|
fb0cfc74d1777c4246d56a5db8525432bf37ab1a
|
42e8858c2ebc6a061012bcadb167d29cebb85c5e
|
refs/heads/main
| 2023-05-29T05:52:22.856551 | 2020-05-23T22:12:48 | 2020-05-23T22:12:48 | 378,832,634 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,228 |
py
|
"""
Description:
Issue Transaction
Usage:
from neo.Core.TX.IssueTransaction import IssueTransaction
"""
class IssueTransaction(Transaction):
"""docstring for IssueTransaction"""
def __init__(self, *args, **kwargs):
"""
Create an instance.
Args:
*args:
**kwargs:
"""
super(IssueTransaction, self).__init__(*args, **kwargs)
self.Type = TransactionType.IssueTransaction
self.Nonce = None
def SystemFee(self):
"""
Get the system fee.
Returns:
Fixed8:
"""
if self.Version >= 1:
return Fixed8.Zero()
all_neo_gas = True
for output in self.outputs:
if output.AssetId != GetSystemCoin().Hash and output.AssetId != GetSystemShare().Hash:
all_neo_gas = False
if all_neo_gas:
return Fixed8.Zero()
return super(IssueTransaction, self).SystemFee()
def GetScriptHashesForVerifying(self, snapshot):
pass
def DeserializeExclusiveData(self, reader):
"""
Deserialize full object.
Args:
reader (neo.IO.BinaryReader):
"""
self.Type = TransactionType.IssueTransaction
if self.Version > 1:
raise Exception("Invalid TX Type")
def SerializeExclusiveData(self, writer):
pass
|
[
"[email protected]"
] | |
d7d3e50712748c0c8737e836bb75ea879e62ba06
|
f5bfdaccf014b9a986a8d1e58a4655c21b8368ce
|
/send_recv/basic_conn/client.py
|
2e1bae8dfa62e99c7500ac9dd12c31c24b8e853e
|
[] |
no_license
|
wlgud0402/class
|
a6029bb51160cb2ba39dd59b3826532becd61895
|
ae84bfe4bb832d1a5a8434f3a6f78a57da272d62
|
refs/heads/master
| 2022-10-09T18:47:53.165134 | 2020-06-13T07:46:21 | 2020-06-13T07:46:21 | 271,963,794 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 264 |
py
|
import socket
import time
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # ipv4, TCP
client_socket.connect(('127.0.0.1', 3000))
# client_socket.sendall(bytes([234, 185 ,128]))
client_socket.sendall("김지형".encode())
client_socket.close()
|
[
"[email protected]"
] | |
9341990ffd55f00376c0f6771d2fff7b135601e0
|
76938f270e6165514162856b2ed33c78e3c3bcb5
|
/lib/coginvasion/minigame/CameraShyHeadPanels.py
|
d54c8dccdc92354cc6b09258cd61a99ec93f52ad
|
[] |
no_license
|
coginvasion/src
|
9a5ec682845cc4c9c013fcc35e9b379bd4360b6c
|
2d7fcdb0cd073050250cb51292ee48300a9fe19f
|
refs/heads/master
| 2021-01-19T06:50:11.786112 | 2015-11-08T12:28:52 | 2015-11-08T12:28:52 | 61,545,543 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,548 |
py
|
# Embedded file name: lib.coginvasion.minigame.CameraShyHeadPanels
from panda3d.core import Point3, VBase4
from direct.directnotify.DirectNotifyGlobal import directNotify
from direct.gui.DirectGui import DirectFrame
from lib.coginvasion.toon.ToonHead import ToonHead
from HeadPanels import HeadPanels
class CameraShyHeadPanels(HeadPanels):
notify = directNotify.newCategory('CameraShyHeadPanels')
def __init__(self):
HeadPanels.__init__(self)
self.framePositions = [Point3(0.15, 0, -0.15),
Point3(0.15, 0, -0.43),
Point3(0.15, 0, -0.71),
Point3(0.15, 0, -0.99)]
self.otherPlayerHeadHolderTransforms = {'scale': Point3(2, 1, 0.5),
'pos': Point3(1.03, 0, 0)}
self.otherPlayerHeadXValues = [-0.45, 0, 0.45]
self.state2Color = {0: VBase4(0.05, 0.05, 0.05, 1.0),
1: VBase4(0.5, 0.5, 0.5, 1.0),
2: VBase4(0.75, 0.75, 0.75, 1.0),
3: VBase4(1.0, 1.0, 1.0, 1.0)}
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame = {}
def generate(self, gender, head, headtype, color, doId, name):
HeadPanels.generate(self, gender, head, headtype, color, doId, name, 0)
def generateOtherPlayerGui(self):
for avId in self.doId2Frame.keys():
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame[avId] = {}
headNumber = -1
frame = self.doId2Frame[avId][0]
otherPlayerHeadsFrame = DirectFrame(relief=None, scale=0.85, parent=frame)
otherPlayerHeadsFrame['image'] = frame['image']
otherPlayerHeadsFrame['image_color'] = frame['image_color']
otherPlayerHeadsFrame['image_scale'] = self.otherPlayerHeadHolderTransforms['scale']
otherPlayerHeadsFrame.setPos(self.otherPlayerHeadHolderTransforms['pos'])
otherPlayerHeadsFrame.setBin('gui-popup', 70)
self.frameList.append(otherPlayerHeadsFrame)
for otherAvId in self.doId2Frame.keys():
if otherAvId != avId:
headNumber += 1
otherAv = base.cr.doId2do.get(otherAvId)
gender = otherAv.getGender()
head, color = otherAv.getHeadStyle()
animal = otherAv.getAnimal()
headFrame = otherPlayerHeadsFrame.attachNewNode('otherPlayerHeadFrame')
headFrame.setPosHprScale(self.otherPlayerHeadXValues[headNumber], 5, -0.1, 180, 0, 0, 0.2, 0.2, 0.2)
headFrame.setColorScale(self.state2Color[0])
toon = ToonHead(None)
toon.generateHead(gender, animal, head)
r, g, b, _ = color
color = (r,
g,
b,
1.0)
toon.setHeadColor(color)
toon.setDepthWrite(1)
toon.setDepthTest(1)
toon.reparentTo(headFrame)
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame[avId][otherAvId] = headFrame
return
def updateOtherPlayerHead(self, avId, otherPlayerAvId, state):
frame = self.avId2otherPlayerAvIds2otherPlayerHeadsFrame[avId][otherPlayerAvId]
frame.setColorScale(self.state2Color[state])
def delete(self):
self.otherPlayerHeadHolderTransforms = None
self.otherPlayerHeadXValues = None
self.state2Color = None
self.avId2otherPlayerAvIds2otherPlayerHeadsFrame = None
HeadPanels.delete(self)
return
|
[
"[email protected]"
] | |
0764441710a1e2bc191eba04dedbcb39accb063a
|
502af3505e4e670c507ee6a5dedbc41995cefa09
|
/deep_generative_models/tasks/arae/sample.py
|
8206a693522c003f87c778e649a89ad618c03744
|
[] |
no_license
|
manoj04418/deep-generative-models
|
c1e8062e280ac6d1f3fb8ab359a21e870a2276df
|
402d06773320231d9135c88d8a6033f916a68f89
|
refs/heads/master
| 2022-10-02T10:44:14.857680 | 2020-06-10T14:50:48 | 2020-06-10T14:50:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 505 |
py
|
import argparse
from deep_generative_models.configuration import load_configuration
from deep_generative_models.tasks.gan_with_autoencoder.sample import SampleGANWithAutoEncoder
if __name__ == '__main__':
options_parser = argparse.ArgumentParser(description="Sample from ARAE.")
options_parser.add_argument("configuration", type=str, help="Configuration json file.")
options = options_parser.parse_args()
SampleGANWithAutoEncoder().timed_run(load_configuration(options.configuration))
|
[
"[email protected]"
] | |
8c0f067abeaf7da4af4794f4fd818c33ee8870ef
|
3f7240da3dc81205a0a3bf3428ee4e7ae74fb3a2
|
/src/Week10/Recursion/Recursive Math/recursiveMath.py
|
3971edeec5e2ac01cd6538f02737243d4440daeb
|
[] |
no_license
|
theguyoverthere/CMU15-112-Spring17
|
b4ab8e29c31410b4c68d7b2c696a76b9d85ab4d8
|
b8287092b14e82d2a3aeac6c27bffbc95382eb34
|
refs/heads/master
| 2021-04-27T08:52:45.237631 | 2018-10-02T15:38:18 | 2018-10-02T15:38:18 | 107,882,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,603 |
py
|
# A few example recursive functions.
# Can you figure out what each one does, in general?
import math
def f1(x):
if x == 0: return 0
else: return 1 + f1(x-1) #f(0) = 0, f(1) = 1, f(2) = 2, f(3) = 3
def f2(x):
if x == 0: return 40
else: return 1 + f2(x-1) #f(0) = 40, f(1) = 41, f(2) = 42
def f3(x):
if x == 0: return 0
else: return 2 + f3(x-1) #f(0) = 0, f(1) = 2, f(2) = 4, f(3) = 6
def f4(x):
if x == 0: return 40
else: return 2 + f4(x-1) #f(0) = 40, f(1) = 42, f(2) = 44, f(3) = 46
def f5(x):
if x == 0: return 0 #Triangular Numbers
else: return x + f5(x-1) #f(0) = 0, f(1) = 1, f(2) = 3, f(3) = 6
def f6(x):
if x == 0: return 0
else: return 2*x-1 + f6(x-1) #f(0) = 0, f(1) = 1, f(2)= 4, f(3) = 9
# (x - 1)** 2 = x**2 - 2 * x + 1
# 2 *x - 1 + (x -1) ** 2 = x ** 2
def f7(x):
if x == 0: return 1
else: return 2*f7(x-1) #f(0) = 1, f(1) = 2, f(2) = 4 , f(3) = 8, f(4) = 16
def f8(x):
if x < 2: return 0
else: return 1 + f8(x//2) #f(0) = 1, f(1) = 0, f(2) = 1, f(4) = 2, f(8) = 3
def f9(x):
if x < 2: return 1
else: return f9(x-1) + f9(x-2) #Fibonacci Numbers
def f10(x):
if x == 0: return 1 # Factorials!
else: return x*f10(x-1) #f(0) = 1, f(1) = 1, f(2) = 2, f(3) = 6, f(4) = 24
def f11(x, y):
if y < 0: return -f11(x, -y)
elif y == 0: return 0
else: return x + f11(x, y-1) #f(2,3) = 2 + f(2, 2)
# = 2 + 2 + f(2, 1)
# = 2 + 2 + 2 + f(2, 0)
# = 2 + 2 + 2 + 0
# = 6
def f12(x,y):
if (x < 0) and (y < 0): return f12(-x,-y)
elif (x == 0) or (y == 0): return 0
else: return x+y-1 + f12(x-1, y-1) #Returns product of x and y
# (x - 1)*(y - 1) = x * y - (x + y - 1)
def f13(L):
assert(type(L) == list)
if len(L) < 2: return [ ]
else: return f13(L[2:]) + [L[1]] # [0, 1, 2, 3, 4, 5] ---> [5, 3, 1]
# [2, 3, 4, 5] ---> [5, 3]
# [4, 5] ---> [] + [5] = [5]
# [] ---> []
def go():
while True:
n = input("Enter function # (1-13, or 0 to quit): ")
if n == "0": break
elif n == "11": print("f11(5, 7) ==", f11(5, 7))
elif n == "12": print("f12(5, 7) ==", f12(5, 7))
elif n == "13": print("f13(list(range(20)) ==", f13(list(range(20))))
else:
f = globals()["f"+n]
print("f"+n+": ", [f(x) for x in range(10)])
print()
go()
|
[
"[email protected]"
] | |
2d4e5375e79cc35c8674acf1f09ed3ea017a8102
|
01bf95e0c0d57e3a1392f9d7e20580376c9e39a2
|
/keystone/backends/sqlalchemy/migrate_repo/versions/002_rename_token_table.py
|
1d15d9dac471ffd9d2e3887ea331871420c26b32
|
[
"Apache-2.0"
] |
permissive
|
oubiwann/keystone
|
433713dd5d542484fc754ecfd097dc02759555b2
|
5c70d24462d75256fb6167d58e13d9c0a3d60427
|
refs/heads/master
| 2021-01-15T16:00:31.822891 | 2011-12-02T14:39:59 | 2011-12-02T14:39:59 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 435 |
py
|
"""
Addresses bug 854425
Renames the 'token' table to 'tokens',
in order to appear more consistent with
other table names.
"""
# pylint: disable=C0103
import sqlalchemy
meta = sqlalchemy.MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
sqlalchemy.Table('token', meta).rename('tokens')
def downgrade(migrate_engine):
meta.bind = migrate_engine
sqlalchemy.Table('tokens', meta).rename('token')
|
[
"[email protected]"
] | |
162ca16f1c1766a7e0eba5b50a4d4e47a7f382d6
|
6b09043b97fb379aebd4363ff07d4cc53e8ec0b9
|
/Day 8/08-DailyFlash_Solutions/22_Jan_Solutions_Three/Python/p1.py
|
95966c06b2a0fe2542e4ee03e372c0259eb8dc23
|
[] |
no_license
|
Aadesh-Shigavan/Python_Daily_Flash
|
6a4bdd73a33f533f3b121fae9eef973e10bf3945
|
b118beeca3f4c97de54ae1a610f83da81157009a
|
refs/heads/master
| 2022-11-28T13:03:17.573906 | 2020-08-06T15:36:36 | 2020-08-06T15:36:36 | 276,581,310 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 499 |
py
|
'''
Program 1: Write a program that accepts two integers from user and prints
addition & Subtraction of them.
{Note: checks for greater number to subtracts with while subtracting numbers}
Input: 10 20
Output:
Addition is 20
Subtraction is 10
'''
var1=int(input("enter first integer "))
var2=int(input("enter second integer "))
add=var1+var2
print("Addition is ",add)
if(var1 > var2):
sub=var1-var2
print("Subtraction is ",sub)
elif(var2>var1):
sub=var2-var1
print("Subtraction is ",sub)
|
[
"[email protected]"
] | |
b04295b6bd02ded41471966b990097969fe52ff6
|
55628a9a08a6b6646b4a8aa74bedbf2e3fd7d850
|
/.history/master_20200108222014.py
|
f0ad1ad6e18e50c3c5af481bb7301cae10b1b643
|
[] |
no_license
|
StRobertCHSCS/final-project-team
|
c115dc11b318f7ac782c94860a8801bb558bd107
|
48907e72813c4dd3b48ff36f794f6fce04533219
|
refs/heads/master
| 2020-12-03T22:35:37.833893 | 2020-01-31T04:05:38 | 2020-01-31T04:05:38 | 231,506,873 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,220 |
py
|
import arcade
import random
# Set how many rows and columns we will have
ROW_COUNT = 29
COLUMN_COUNT = 51
# This sets the WIDTH and HEIGHT of each grid location
WIDTH = 20
HEIGHT = 20
# This sets the margin between each cell
# and on the edges of the screen.
MARGIN = 5
# Do the math to figure out our screen dimensions
SCREEN_WIDTH = (WIDTH + MARGIN) * COLUMN_COUNT + MARGIN
SCREEN_HEIGHT = (HEIGHT + MARGIN) * ROW_COUNT + MARGIN
movedic = {"up":False, "down":False, "left":False, "right" = False}
player_x_column = 5
player_y_row = 5
texture = arcade.load_texture("griddd.jpg")
grid = []
def on_update(delta_time):
snake_move()
def on_draw():
arcade.start_render()
grid_background()
snake()
apple()
print (movedic)
def grid_background():
arcade.draw_texture_rectangle(SCREEN_WIDTH//2, SCREEN_HEIGHT//2, texture.width, texture.height, texture, 0)
def snake_move():
global player_x, player_y, player_x_column, player_y_row, moveList
if (0 < player_x_column < COLUMN_COUNT) and (0 < player_y_row < ROW_COUNT):
if up:
player_y_row += 1
elif down:
player_y_row -= 1
elif right:
player_x_column += 1
elif left:
player_x_column -= 1
else:
player_x_column = 5
player_y_row = 5
movedic
# Player coordinates
player_x = (MARGIN + WIDTH) * player_x_column + MARGIN + WIDTH // 2
player_y = (MARGIN + HEIGHT) * player_y_row + MARGIN + HEIGHT // 2
def snake():
arcade.draw_rectangle_filled(player_x, player_y, WIDTH, HEIGHT, arcade.color.BLUE)
def apple():
apple_x = random.randint(0, COLUMN_COUNT)
apple_y = random.randint(0, ROW_COUNT)
arcade.draw_rectangle_filled(apple_x, apple_y, WIDTH, HEIGHT, arcade.color.RED)
def wall():
pass
def on_key_press(key, modifiers):
global up, down, left, right
if key == arcade.key.W:
up = True
down = False
right = False
left = False
elif key == arcade.key.S:
down = True
up = False
right = False
left = False
elif key == arcade.key.A:
left = True
up = False
down = False
right = False
elif key == arcade.key.D:
right = True
up = False
down = False
left = False
def on_key_release(key, modifiers):
pass
def on_mouse_press(x, y, button, modifiers):
pass
def setup():
global grid
arcade.open_window(SCREEN_WIDTH, SCREEN_HEIGHT, "snake")
arcade.set_background_color(arcade.color.BLACK)
arcade.schedule(on_update, 1/10)
# Override arcade window methods
window = arcade.get_window()
window.on_draw = on_draw
window.on_key_press = on_key_press
window.on_key_release = on_key_release
window.on_mouse_press = on_mouse_press
# array is simply a list of lists.
for row in range(ROW_COUNT):
# Add an empty array that will hold each cell
# in this row
grid.append([])
for column in range(COLUMN_COUNT):
grid[row].append(0) # Append a cell
arcade.run()
if __name__ == '__main__':
setup()
|
[
"[email protected]"
] | |
038ed0403663029f64d78bc9575373753b2fc331
|
bf28036f99ee0d94ac6c5172659018c5b55fa337
|
/drum.py
|
b4f15451e9124ed7a1e29d1c8559af5b9569f58c
|
[] |
no_license
|
shantinavgurukul/Dictionary_questions
|
1fb2da829675fb8e5ef23b7259e2de29f58ce505
|
ac79ec33901de4414359e48a88cf2cc882d79b5c
|
refs/heads/master
| 2022-12-26T13:15:33.477165 | 2020-10-05T16:21:00 | 2020-10-05T16:21:00 | 301,469,439 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 451 |
py
|
import json
dict1 ={
"emp1": {
"name": "Lisa",
"designation": "programmer",
"age": "34",
"salary": "54000"
},
"emp2": {
"name": "Elis",
"designation": "Trainee",
"age": "24",
"salary": "40000"
},
}
out_file = open("myfile.json", "w")
# a=write(o)
# store=json.dumps(dict1,indent = 6)
# # out_file.close()
# print(store)
json.dumps(out_file,indent=4)
print(out_file)
|
[
"[email protected]"
] | |
f5f87e659a58abd555e1e571e39cf2b5eedc1cd1
|
6fcfb638fa725b6d21083ec54e3609fc1b287d9e
|
/python/weecology_retriever/retriever-master/scripts/prism_climate.py
|
ed1da4bfd28d1a966b678c31c30ee3d3244572a9
|
[] |
no_license
|
LiuFang816/SALSTM_py_data
|
6db258e51858aeff14af38898fef715b46980ac1
|
d494b3041069d377d6a7a9c296a14334f2fa5acc
|
refs/heads/master
| 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 |
Python
|
UTF-8
|
Python
| false | false | 2,745 |
py
|
#retriever
"""Retriever script for direct download of PRISM climate data"""
from future import standard_library
standard_library.install_aliases()
from builtins import range
from retriever.lib.templates import Script
import urllib.request, urllib.parse, urllib.error
class main(Script):
def __init__(self, **kwargs):
Script.__init__(self, **kwargs)
self.name = "PRISM Climate Data"
self.shortname = "prism-climate"
self.retriever_minimum_version = '2.0.dev'
self.version = '1.1.1'
self.ref = "http://prism.oregonstate.edu/"
self.urls = {"climate": "http://services.nacse.org/prism/data/public/4km/"}
self.description = "The PRISM data set represents climate observations from a wide range of monitoring networks, applies sophisticated quality control measures, and develops spatial climate datasets to reveal short- and long-term climate patterns. "
def get_file_names(self, clim_var, mval, year, month):
"""Create a list of all filenames in a given monthly data zip file """
file_extensions = ['bil', 'bil.aux.xml', 'hdr', 'info.txt', 'prj', 'stx', 'xml']
file_names = []
for extension in file_extensions:
file_names.append("PRISM_{}_stable_4km{}_{}{}_bil.{}".format(clim_var,
mval,
year,
month,
extension))
return file_names
def download(self, engine=None, debug=False):
if engine.name != "Download Only":
raise Exception("The PRISM dataset contains only non-tabular data files, and can only be used with the 'download only' engine.")
Script.download(self, engine, debug)
clim_vars = ['ppt', 'tmax', 'tmean', 'tmin']
years = list(range(1981, 2015))
months = ["{:02d}".format(i) for i in range(1,13)]
for clim_var in clim_vars:
mval = "M3" if clim_var == 'ppt' else "M2"
for year in years:
for month in months:
file_names = self.get_file_names(clim_var, mval, year, month)
file_url = urllib.parse.urljoin(self.urls["climate"], "{}/{}{}".format(clim_var, year, month))
archivename = "PRISM_{}_stable_4km{}_{}{}_bil.zip".format(clim_var, mval, year, month)
self.engine.download_files_from_archive(file_url, file_names, archivename=archivename, keep_in_dir=True)
self.engine.register_files(file_names)
SCRIPT = main()
|
[
"[email protected]"
] | |
59fcde378e9247778415b7848b2705ccfe8e3385
|
11a0fab712b139bcba9e90f6acdc7597dff68dbb
|
/mestrado/ppgmcs/m07-elaboracao-de-dissertacao-i/projeto/codigo/teste1/grade/grade.py
|
daaca83d32160a4d36b59cbfca4b0cb9ba952eb3
|
[] |
no_license
|
fapers/MeusTreinamentos
|
17ba096d518df533433ae2528b70d18717f3cf96
|
32a6b791b0c3dbb8b29ffd177597919e768b09b5
|
refs/heads/master
| 2023-06-04T14:00:37.847808 | 2021-06-28T02:37:11 | 2021-06-28T02:37:11 | 292,962,787 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 424 |
py
|
import numpy as np
def newgrade(h, d, t, p):
grade = np.arange(len(h)*len(d)*len(t)).reshape(len(d), len(h), len(t))
for i in range(len(d)):
for j in range(len(h)):
for k in range(len(t)):
aleatorio = np.random.choice(p)
grade[i][j][k] = int(aleatorio)
return grade
def grade(t, p):
grade = np.arrange(len(t)*len(p)*np.sum(p).reshape(len(t), np.sum(p))
|
[
"[email protected]"
] | |
05e423199bcd4237ba3acf5f863237c356aa85d7
|
95289559871f328cbed764cee33f85994599ef1f
|
/my_package/SeparableConvFlow/SeparableConvFlowLayer.py
|
3428875de42fdbb72f8502bd817f5a3fa7a06b03
|
[
"MIT"
] |
permissive
|
Bostwickenator/Dain-App
|
10a6f725e8c82b2a9a4f59060521d675f5b63e40
|
27a9dc83f36b549129a1815a095da9c782c8752e
|
refs/heads/master
| 2023-03-18T16:41:31.839901 | 2021-03-20T20:00:53 | 2021-03-20T20:00:53 | 349,823,152 | 0 | 0 |
MIT
| 2021-03-20T20:59:25 | 2021-03-20T19:58:53 |
Python
|
UTF-8
|
Python
| false | false | 4,368 |
py
|
# this is for wrapping the customized layer
import torch
from torch.autograd import Function
import separableconvflow_cuda as my_lib
import warnings
#Please check how the STN FUNCTION is written :
#https://github.com/fxia22/stn.pytorch/blob/master/script/functions/gridgen.py
#https://github.com/fxia22/stn.pytorch/blob/master/script/functions/stn.py
class SeparableConvFlowLayer(Function):
def __init__(self,filtersize):
self.filtersize = filtersize
warnings.warn("\nSeparable Conv Flow Layer is not precise enough for optical flow due to a divison operation")
super(SeparableConvFlowLayer,self).__init__()
def forward(self, input1,input2,input3):
intBatches = input1.size(0)
intInputDepth = input1.size(1)
intInputHeight = input1.size(2)
intInputWidth = input1.size(3)
intFilterSize = min(input2.size(1), input3.size(1))
intOutputHeight = min(input2.size(2), input3.size(2))
intOutputWidth = min(input2.size(3), input3.size(3))
assert(intInputHeight - self.filtersize == intOutputHeight - 1)
assert(intInputWidth - self.filtersize == intOutputWidth - 1)
assert(intFilterSize == self.filtersize)
assert(input1.is_contiguous() == True)
assert(input2.is_contiguous() == True)
assert(input3.is_contiguous() == True)
# output = input1.new().resize_(intBatches, intInputDepth, intOutputHeight, intOutputWidth).zero_()
flow_ouput = torch.zeros(intBatches, 2,intOutputHeight, intOutputWidth) # as a byproduct of SepConv, but no
# assert(input1.is_contiguous())
# assert(input2.is_contiguous())
self.input1 = input1.contiguous() # need to use in the backward process, so we need to cache it
self.input2 = input2.contiguous() # TODO: Note that this is simply a shallow copy?
self.input3 = input3.contiguous()
if input1.is_cuda:
self.device = torch.cuda.current_device()
else:
self.device = -1
if input1.is_cuda :
# output = output.cuda()
flow_ouput = flow_ouput.cuda()
err = my_lib.SeparableConvFlowLayer_gpu_forward(input1, input2,input3,flow_ouput)
else:
# output = torch.cuda.FloatTensor(input1.data.size())
err = my_lib.SeparableConvFlowLayer_cpu_forward(input1, input2,input3,flow_ouput)
if err != 0:
print(err)
# the function returns the output to its caller
return flow_ouput
#TODO: if there are multiple outputs of this function, then the order should be well considered?
def backward(self, gradoutput):
# print("Backward of Interpolation Layer")
# gradinput1 = input1.new().zero_()
# gradinput2 = input2.new().zero_()
gradinput1 = torch.zeros(self.input1.size()) # the input1 has zero gradient because flow backprop. nothing to gradinput1
gradinput2 = torch.zeros(self.input2.size())
gradinput3 = torch.zeros(self.input3.size())
if self.input1.is_cuda:
# print("CUDA backward")
gradinput1 = gradinput1.cuda(self.device)
gradinput2 = gradinput2.cuda(self.device)
gradinput3 = gradinput3.cuda(self.device)
# the input1 image should not require any gradients
# print("Does input1 requires gradients? " + str(self.input1.requires_grad))
# err = my_lib.SeparableConvFlowLayer_gpu_backward(self.input1,self.input2,self.input3, gradoutput,gradinput1,gradinput2,gradinput3)
err = my_lib.SeparableConvFlowLayer_gpu_backward(self.input1,self.input2,self.input3, gradoutput,gradinput1,gradinput2,gradinput3)
if err != 0 :
print(err)
else:
# print("CPU backward")
# print(gradoutput)
# print(err)
# err = my_lib.SeparableConvFlowLayer_cpu_backward(self.input1, self.input2, self.input3, gradoutput, gradinput1, gradinput2, gradinput3)
err = my_lib.SeparableConvFlowLayer_cpu_backward(self.input1, self.input2, self.input3, gradoutput, gradinput1, gradinput2, gradinput3)
if err != 0 :
print(err)
# print(gradinput1)
# print(gradinput2)
# print(gradinput1)
return gradinput1, gradinput2,gradinput3
|
[
"[email protected]"
] | |
87c07543dd40fb4839f8bd146fa7eb9bd2b4adca
|
0fefd630aa4b500a1a218f5f12d351dfeb79d4a7
|
/Class-HomeWork/03.RectanglePosition.py
|
8253241100608d02637c7cb68e4136c21fa3125e
|
[
"MIT"
] |
permissive
|
bozhikovstanislav/Python-Fundamentals
|
a7e7659d7ce8996f9e5dc17a8a0c5fcd5fbab65f
|
072fd2c8bc962d20d4c526947349fdeae0bc94a5
|
refs/heads/master
| 2020-04-15T00:22:02.395202 | 2019-03-10T15:46:48 | 2019-03-10T15:46:48 | 164,237,382 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,532 |
py
|
def is_inside(rect_a, rect_b):
b_left = rect_a.get_x() >= rect_b.get_x()
b_top = rect_a.get_y() <= rect_b.get_y()
b_get_right_ = rect_a.get_x1() <= rect_b.get_x1()
get_bottum_ = rect_a.get_y1() <= rect_b.get_y1()
if b_left and b_get_right_ and b_top and get_bottum_:
return 'Inside'
return 'Not inside'
class Rectungle:
def __init__(self, x, y, width, height):
self.__x = x
self.__y = y
self.__width = width
self.__height = height
def set_x(self, x):
if isinstance(x, int):
return x
def get_x(self):
return self.__x
def set_y(self, y):
if isinstance(y, int):
return y
def get_y(self):
return self.__y
def set_width(self, width):
if isinstance(width, int):
return width
def get_width(self):
return self.__width
def set_height(self, height):
if isinstance(height, int):
return height
def get_height(self):
return self.__height
def get_x1(self):
h = self.get_x() + abs(self.get_width())
return h
def get_y1(self):
return self.get_y() + self.get_height()
rectungle_one = list(map(int, input().split()))
rectungle_tow = list(map(int, input().split()))
rect_one = Rectungle(rectungle_one[0], rectungle_one[1], rectungle_one[2], rectungle_one[3])
rect_tow = Rectungle(rectungle_tow[0], rectungle_tow[1], rectungle_tow[2], rectungle_tow[3])
print(is_inside(rect_one, rect_tow))
|
[
"[email protected]"
] | |
b5cffee6d892f73cbea112ed9209626b511c5b1e
|
7c1be5665bf193281a90ba44ce0c7fe2215c2630
|
/拼多多/pin_04.py
|
a9eb15ca196e2c0e424ceed3f119dcf366c71c46
|
[] |
no_license
|
TechInTech/Interview_Codes
|
47a8748ff0b70b37949034926fdc01ec1f912584
|
24145a34de7a80b8dd7379914ab27e0017541b25
|
refs/heads/master
| 2020-07-08T22:37:27.952537 | 2019-10-14T05:42:00 | 2019-10-14T05:42:00 | 203,798,729 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,407 |
py
|
# -*- coding:utf-8 -*-
import sys
if __name__ == '__main__':
str_input = sys.stdin.readline().strip().split(' ')
sub_str = []
for i in str_input:
if len(i) > 1:
sub_str.append(i[0] + i[-1])
else:
sub_str.append(i[0])
# print(sub_str)
str_dict = {}
flag = True
while flag:
for item in sub_str:
if len(item) == 1:
if item in str_dict.values():
key = list(str_dict.keys())[list(str_dict.values()).index(item)]
str_dict[key] = item
else:
str_dict[item] = item
else:
if item[0] in str_dict.values():
key = list(str_dict.keys())[list(str_dict.values()).index(item[0])]
str_dict[key] = item[-1]
else:
str_dict[item[0]] = item[-1]
list_dict = []
for k1, it1 in str_dict.items():
list_dict.extend([k1, it1])
if len(str_dict) == 1 or len(set(list_dict)) >= 3:
flag = False
else:
sub_str = []
for k, it in str_dict.items():
sub_str.append(k + it)
str_dict = {}
if len(str_dict) > 1 or (len(str_dict) == 1 and list(str_dict.keys())[0] != list(str_dict.values())[0]):
print(False)
else:
print(True)
|
[
"[email protected]"
] | |
56c83d148ef48487ce438c5eb9f69b92baa0f3bb
|
6b1dd40d16ae6169e7ed780c5062e88d10502c85
|
/Kaggle/Playgroud/RiskPrediction/Home-Credit-Default-Risk-master/py/trash/922_predict_829-1.py
|
13011603b4e56baf37e707fbd78dcb77ebcf9844
|
[
"MIT"
] |
permissive
|
hehuanlin123/DeepLearning
|
8a59680a341cfc525d50aa5afc3e44202ca4acc4
|
6b7feabbbde9ac9489f76da4c06eeb6703fb165a
|
refs/heads/master
| 2022-07-12T09:26:08.617883 | 2019-06-10T11:31:37 | 2019-06-10T11:31:37 | 183,748,407 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,733 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 29 01:21:16 2018
@author: Kazuki
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
import gc, os
import sys
argv = sys.argv
sys.path.append(f'/home/{os.environ.get("USER")}/PythonLibrary')
import lgbextension as ex
import lightgbm as lgb
from multiprocessing import cpu_count
from sklearn.model_selection import GroupKFold
from sklearn.metrics import roc_auc_score
from glob import glob
import utils, utils_cat
utils.start(__file__)
#==============================================================================
SEED = 71
LOOP = 5
NFOLD = 7
SUBMIT_FILE_PATH = '../output/829-1.csv.gz'
HEAD = 600
EXE_SUBMIT = False
COMMENT = 'CV(single): 0.806418 + 0.002499 600features'
param = {
'objective': 'binary',
'metric': 'auc',
'learning_rate': 0.01,
'max_depth': 6,
'num_leaves': 63,
'max_bin': 255,
'min_child_weight': 10,
'min_data_in_leaf': 150,
'reg_lambda': 0.5, # L2 regularization term on weights.
'reg_alpha': 0.5, # L1 regularization term on weights.
'colsample_bytree': 0.9,
'subsample': 0.9,
# 'nthread': 32,
'nthread': cpu_count(),
'bagging_freq': 1,
'verbose':-1,
# 'seed': SEED
}
np.random.seed(SEED)
#==============================================================================
imp = pd.read_csv('LOG/imp_801_imp_lgb.py-2.csv')
imp.sort_values('total', ascending=False, inplace=True)
def mk_submit(HEAD=HEAD):
features = imp.head(HEAD).feature
files_tr = ('../feature/train_' + features + '.f').tolist()
files_te = ('../feature/test_' + features + '.f').tolist()
# =============================================================================
# load
# =============================================================================
# train
X_train = pd.concat([
pd.read_feather(f) for f in tqdm(files_tr, mininterval=60)
], axis=1)
y_train = utils.read_pickles('../data/label').TARGET
X_train.head().to_csv(SUBMIT_FILE_PATH.replace('.csv', '_X.csv'),
index=False, compression='gzip')
if X_train.columns.duplicated().sum()>0:
raise Exception(f'duplicated!: { X_train.columns[X_train.columns.duplicated()] }')
print('no dup :) ')
print(f'X_train.shape {X_train.shape}')
gc.collect()
CAT = list( set(X_train.columns) & set(utils_cat.ALL) )
COL = X_train.columns.tolist()
# test
X_test = pd.concat([
pd.read_feather(f) for f in tqdm(files_te, mininterval=60)
], axis=1)[COL]
# =============================================================================
# groupKfold
# =============================================================================
sk_tbl = pd.read_csv('../data/user_id_v7.csv.gz') # TODO: check
user_tbl = sk_tbl.user_id.drop_duplicates().reset_index(drop=True).to_frame()
sub_train = pd.read_csv('../input/application_train.csv.zip', usecols=['SK_ID_CURR']).set_index('SK_ID_CURR')
sub_train['y'] = y_train.values
group_kfold = GroupKFold(n_splits=NFOLD)
# =============================================================================
# training with cv
# =============================================================================
model_all = []
auc_mean = 0
for i in range(LOOP):
dtrain = lgb.Dataset(X_train, y_train, categorical_feature=CAT, free_raw_data=False)
# shuffle fold
ids = list(range(user_tbl.shape[0]))
np.random.shuffle(ids)
user_tbl['g'] = np.array(ids) % NFOLD
sk_tbl_ = pd.merge(sk_tbl, user_tbl, on='user_id', how='left').set_index('SK_ID_CURR')
sub_train['g'] = sk_tbl_.g
folds = group_kfold.split(X_train, sub_train['y'], sub_train['g'])
gc.collect()
param['seed'] = i
ret, models = lgb.cv(param, dtrain, 9999, folds=folds,
early_stopping_rounds=100, verbose_eval=50,
seed=i)
model_all += models
auc_mean += ret['auc-mean'][-1]
auc_mean /= LOOP
result = f"CV auc-mean(feature {HEAD}): {auc_mean}"
print(result)
utils.send_line(result)
# =============================================================================
# predict
# =============================================================================
sub = pd.read_pickle('../data/sub.p')
gc.collect()
label_name = 'TARGET'
sub[label_name] = 0
for model in model_all:
y_pred = model.predict(X_test)
sub[label_name] += pd.Series(y_pred).rank()
sub[label_name] /= len(model_all)
sub[label_name] /= sub[label_name].max()
sub['SK_ID_CURR'] = sub['SK_ID_CURR'].map(int)
sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')
# =============================================================================
# submission
# =============================================================================
if EXE_SUBMIT:
print('submit')
utils.submit(SUBMIT_FILE_PATH, COMMENT)
# =============================================================================
# main
# =============================================================================
mk_submit(HEAD)
#==============================================================================
utils.end(__file__)
utils.stop_instance()
|
[
"[email protected]"
] | |
91564fe0e139d2f5aacf5af9c5d65c3f535552ce
|
310acdc816471356063121a966f6243eb5ee4bc3
|
/timeserio/validation/__init__.py
|
36dcf7d16029512b047b9ccbfc7cf74dd88a9d1a
|
[
"MIT"
] |
permissive
|
octoenergy/timeserio
|
e3d1b79d7425448346bc52ef2ae8c6daf625d368
|
fbf99a05e420e684a941dd8223d07183644ced2d
|
refs/heads/master
| 2023-04-06T22:19:29.835755 | 2022-10-10T08:33:55 | 2022-10-10T08:33:55 | 196,452,519 | 69 | 12 |
MIT
| 2023-03-25T02:06:46 | 2019-07-11T19:18:04 |
Python
|
UTF-8
|
Python
| false | false | 69 |
py
|
from .validation import is_valid_multimodel, is_valid_pickle # noqa
|
[
"[email protected]"
] | |
ba1831997efc65fdd8de32d918565cd280a23b1f
|
a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea
|
/airflow/decorators/python.py
|
3f00681ccfdde818a19511335dab7fefa7db6aa4
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
ishiis/airflow
|
4305794e36b611d01f49e3f2401be3dc49782670
|
292440d54f4db84aaf0c5a98cf5fcf34303f2fa8
|
refs/heads/master
| 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 |
Apache-2.0
| 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null |
UTF-8
|
Python
| false | false | 3,205 |
py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Callable, Optional, Sequence
from airflow.decorators.base import DecoratedOperator, TaskDecorator, task_decorator_factory
from airflow.operators.python import PythonOperator
class _PythonDecoratedOperator(DecoratedOperator, PythonOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
template_fields: Sequence[str] = ('op_args', 'op_kwargs')
template_fields_renderers = {"op_args": "py", "op_kwargs": "py"}
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ('python_callable',)
def __init__(self, *, python_callable, op_args, op_kwargs, **kwargs) -> None:
kwargs_to_upstream = {
"python_callable": python_callable,
"op_args": op_args,
"op_kwargs": op_kwargs,
}
super().__init__(
kwargs_to_upstream=kwargs_to_upstream,
python_callable=python_callable,
op_args=op_args,
op_kwargs=op_kwargs,
**kwargs,
)
def python_task(
python_callable: Optional[Callable] = None,
multiple_outputs: Optional[bool] = None,
**kwargs,
) -> TaskDecorator:
"""Wraps a function into an Airflow operator.
Accepts kwargs for operator kwarg. Can be reused in a single DAG.
:param python_callable: Function to decorate
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
"""
return task_decorator_factory(
python_callable=python_callable,
multiple_outputs=multiple_outputs,
decorated_operator_class=_PythonDecoratedOperator,
**kwargs,
)
|
[
"[email protected]"
] | |
de8f80151a4960a8f4e5d28c3ea758062ee104bf
|
e8c3e7964f4b448e94481704d29508e9d6bd1798
|
/CommonTools/python/HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen0SetTo0_0505Files_1SetTo1.py
|
a08f9f4a69a9ca55ba3d7591cf98e7a06773d704
|
[] |
no_license
|
senka/ZZ_2l2nu_4l_CMS_combination
|
1401f81dc255ea0ae4a0a5c73b022670849a1152
|
197655fa2143ffe1665cd7a1c6e5af2a2f48e57a
|
refs/heads/master
| 2021-01-13T02:06:27.885996 | 2014-08-09T16:15:14 | 2014-08-09T16:15:14 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,872 |
py
|
from CombinedEWKAnalysis.CommonTools.AnomalousCouplingModel import *
import ROOT as r
import os
basepath = '%s/src/CombinedEWKAnalysis/CommonTools/data/WV_semileptonic'%os.environ['CMSSW_BASE']
#filename = '%s/ATGC_shape_coefficients.root'%basepath
#filename = '%s/signal_WV.root'%basepath
#this model is in the equal couplings scenario of HISZ or something similar
#it does the old style limits of setting the other parameter to zero
class HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen0SetTo0_0505Files_1SetTo1(AnomalousCouplingModel):
def __init__(self,mode):
AnomalousCouplingModel.__init__(self)
self.processes = ['WWgammaZ']
self.channels = ['WV_atgc_semileptonic']
# self.lepchannels = ['ch1','ch2','ch3','ch4']
self.lepchannels = ['ch1','ch2','ch3','ch4','ch5','ch6']
# self.lepchannels = ['ch1','ch2']
self.pois = ['dkg','dg1','lZ']
self.mode = mode
self.anomCoupSearchWindows = {'dkg':['-0.006','0.006'],
'dg1':['-0.006','0.006'],
'lZ' :['-0.006','0.006'] }
# self.anomCoupSearchWindows = {'dkg':['-0.015','0.015'],
# 'dg1':['-0.015','0.015'],
# 'lZ' :['-0.015','0.015'] }
self.verbose = False
def buildScaling(self,process,channel,lepchannel):
scalerName = '%s_%s_%s'%(process,channel,lepchannel)
print '\t\t *********************** Hagiwara reading: %s/signal_WV_%s_f5z_ifLessThen1SetTo1.root'%(basepath,lepchannel)
filename = '%s/signal_WV_%s_f5z_ifLessThen1SetTo1_0505Files.root'%(basepath,lepchannel)
# f = r.TFile('%s/mu_boosted.root'%basepath,'READ')
print '\t\t *********************** Hagiwara reading: %s/%s_boosted.root'%(basepath,lepchannel)
f = r.TFile('%s/%s_boosted.root'%(basepath,lepchannel),'READ')
# SM_diboson_shape = f.Get('diboson').Clone('SM_wv_semil_mu_shape_for_scale')
if ('ch' in lepchannel):
print 'reading ZZ2l2nu for %s'%lepchannel
SM_diboson_shape = f.Get('zz2l2nu').Clone('SM_wv_semil_%s_shape_for_scale'%lepchannel)
else:
print 'reading diboson %s'%lepchannel
SM_diboson_shape = f.Get('diboson').Clone('SM_wv_semil_%s_shape_for_scale'%lepchannel)
SM_diboson_shape.SetDirectory(0)
f.Close()
self.modelBuilder.out._import(SM_diboson_shape)
SM_diboson_shape_dhist = r.RooDataHist('DHIST_SM_wv_semil_%s_shape_for_scale'%lepchannel,
'DHIST_SM_wv_semil_%s_shape_for_scale'%lepchannel,
r.RooArgList(self.modelBuilder.out.var('W_pt_%s'%lepchannel)),
self.modelBuilder.out.obj('SM_wv_semil_%s_shape_for_scale'%lepchannel))
self.modelBuilder.out._import(SM_diboson_shape_dhist)
# self.modelBuilder.factory_('RooHistFunc::Scaling_base_pdf_%s({W_pt},DHIST_SM_wv_semil_mu_shape_for_scale)'%(scalerName))
self.modelBuilder.factory_('RooHistFunc::Scaling_base_pdf_%s({W_pt_%s},DHIST_SM_wv_semil_%s_shape_for_scale)'%(scalerName,lepchannel,lepchannel))
self.modelBuilder.factory_('RooATGCProcessScaling_wz::Scaling_%s(W_pt_%s,dkg,lZ,dg1,Scaling_base_pdf_%s,"%s")'%(scalerName,lepchannel,scalerName,filename))
if ( self.mode == 'dkglZ' ):
self.modelBuilder.out.function('Scaling_%s'%scalerName).setLimitType(0)
self.modelBuilder.out.var('dg1').setVal(0)
self.modelBuilder.out.var('dg1').setConstant(True)
elif ( self.mode == 'dg1lZ' ):
self.modelBuilder.out.function('Scaling_%s'%scalerName).setLimitType(1)
self.modelBuilder.out.var('dkg').setVal(0)
self.modelBuilder.out.var('dkg').setConstant(True)
elif ( self.mode == 'dkgdg1' ):
self.modelBuilder.out.function('Scaling_%s'%scalerName).setLimitType(2)
self.modelBuilder.out.var('lZ').setVal(0)
self.modelBuilder.out.var('lZ').setConstant(True)
else:
raise RuntimeError('InvalidCouplingChoice',
'We can only use [dkg,lZ], [dg1,lZ], and [dkg,dg1]'\
' as POIs right now!')
return scalerName
dkglZModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1 = HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1('dkglZ')
dg1lZModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1 = HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1('dg1lZ')
dkgdg1Model_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1 = HagiwaraAndZeppenfeldTwoDimensionalModel_wz_f5z_ifLessThen1SetTo1_0505Files_1SetTo1('dkgdg1')
|
[
"[email protected]"
] | |
333db2238260b5ce45d4d105fa1e5cac5933855d
|
dfc686228834750216b2cd6eea14d2a6d12422e4
|
/Hackerrank_Python_solution/RegexandParsing/DetectHTMLTagsAttributesandAttributeValues.py
|
920d82f47e5c38a13364c5af8b4e6a11668ec42b
|
[] |
no_license
|
Parth-Ps/python
|
8466e8856bf301908544eb60ae4a68338ccf4550
|
bb448c2a7996d17883214fe8eb11caa61e211400
|
refs/heads/master
| 2023-01-22T13:30:50.507021 | 2020-12-02T07:59:53 | 2020-12-02T07:59:53 | 317,788,331 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 770 |
py
|
'''
Title : Detect HTML Tags, Attributes and Attribute Values
Subdomain : Regex and Parsing
Domain : Python
Author : Ahmedur Rahman Shovon
Created : 15 July 2016
'''
from html.parser import HTMLParser
class CustomHTMLParser(HTMLParser):
def handle_starttag(self,tag,attrs):
print(tag)
self.handle_attrs(attrs)
def handle_startendtag(self,tag,attrs):
print(tag)
self.handle_attrs(attrs)
def handle_attrs(self,attrs):
for attrs_pair in attrs:
print('->',attrs_pair[0].strip(),'>',attrs_pair[1].strip())
n = int(input())
html_string = ''
for i in range(n):
html_string += input()
customHTMLParser = CustomHTMLParser()
customHTMLParser.feed(html_string)
customHTMLParser.close()
|
[
"[email protected]"
] | |
92d7a6472e931edc858825d8e9d035a8f6ac359a
|
772936057748d5cfb7fc8a4d521cfc223ebdd6f3
|
/Insertion Sort/Insertion Sort.py
|
f5d104199fb7aec3f24326b91e1e77c210f27b04
|
[] |
no_license
|
xCE3/ChiCodesPython
|
57e48b0b2b4fb355628a08dbe605d3b597513183
|
a5e2600b66b16deee331804030add5eb47e1295f
|
refs/heads/master
| 2020-06-13T16:33:13.677896 | 2019-07-31T16:00:52 | 2019-07-31T16:00:52 | 194,712,372 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 249 |
py
|
def insertion_sort(arr):
for i in range(1, len(arr)):
for j in range(i-1, -1, -1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
print(insertion_sort([2,8,5,3,10,9,-2,21,9]))
|
[
"[email protected]"
] | |
324f74a6c4f93ac617ebbd3b593a6080f88fe1d1
|
2a67dc681af4c4b9ef7a8e18c2ff75377dc5b44f
|
/aws.ec2.VpcEndpoint.basic-w-tags-python/__main__.py
|
ec260cab0c46c84539b68d1d8344454640761a1a
|
[] |
no_license
|
ehubbard/templates-aws
|
e323b693a18234defe6bd56ffcc64095dc58e3a1
|
2ae2e7a5d05490078017fed6d132dcdde1f21c63
|
refs/heads/master
| 2022-11-17T13:53:14.531872 | 2020-07-10T21:56:27 | 2020-07-10T21:56:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 202 |
py
|
import pulumi
import pulumi_aws as aws
s3 = aws.ec2.VpcEndpoint("s3",
service_name="com.amazonaws.us-west-2.s3",
tags={
"Environment": "test",
},
vpc_id=aws_vpc["main"]["id"])
|
[
"[email protected]"
] | |
b818a9154b0d83fa3304579263317d182517db0d
|
d570d68fff337f2b14b61afe9d8cba6b228b3a6a
|
/tests/pep492/test_async_await.py
|
c9f0ceb66a15c1107515479aa795eb93daeb8e2e
|
[
"BSD-2-Clause"
] |
permissive
|
meren/aiopg
|
bce6c50229061818e3d1a318c748479d1896881c
|
798e41babe50394a0f7704d99c31d9d011fae16f
|
refs/heads/master
| 2020-12-07T00:33:04.878681 | 2015-12-21T12:07:43 | 2015-12-21T12:07:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,452 |
py
|
import unittest
import asyncio
import aiopg
class TestAsyncWith(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
self.database = 'aiopg'
self.user = 'aiopg'
self.host = '127.0.0.1'
self.password = 'aiopg'
def tearDown(self):
self.loop.close()
self.loop = None
async def connect(self, no_loop=False, **kwargs):
loop = None if no_loop else self.loop
conn = await aiopg.connect(database=self.database,
user=self.user,
password=self.password,
host=self.host,
loop=loop,
**kwargs)
self.addCleanup(conn.close)
return conn
def test_cursor_await(self):
async def go():
conn = await self.connect()
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
cursor.close()
self.loop.run_until_complete(go())
def test_connect_context_manager(self):
async def go():
kw = dict(database='aiopg', user='aiopg', password='passwd',
host='127.0.0.1', loop=self.loop)
async with aiopg.connect(**kw) as conn:
cursor = await conn.cursor()
await cursor.execute('SELECT 42')
resp = await cursor.fetchone()
assert resp == (42, )
cursor.close()
assert conn.closed
self.loop.run_until_complete(go())
def test_connection_context_manager(self):
async def go():
conn = await self.connect()
assert not conn.closed
async with conn:
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
cursor.close()
assert conn.closed
self.loop.run_until_complete(go())
def test_cursor_create_with_context_manager(self):
async def go():
conn = await self.connect()
async with conn.cursor() as cursor:
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
assert not cursor.closed
assert cursor.closed
self.loop.run_until_complete(go())
def test_cursor_with_context_manager(self):
async def go():
conn = await self.connect()
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
assert not cursor.closed
async with cursor:
resp = await cursor.fetchone()
assert resp == (42, )
assert cursor.closed
self.loop.run_until_complete(go())
def test_cursor_lightweight(self):
async def go():
conn = await self.connect()
cursor = await conn.cursor()
await cursor.execute('SELECT 42;')
assert not cursor.closed
async with cursor:
pass
assert cursor.closed
self.loop.run_until_complete(go())
def test_pool_context_manager(self):
async def go():
pool = await aiopg.create_pool(host=self.host, user=self.user,
database=self.database,
password=self.password,
loop=self.loop)
async with pool:
conn = await pool.acquire()
async with conn.cursor() as cursor:
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
pool.release(conn)
assert cursor.closed
assert pool.closed
self.loop.run_until_complete(go())
def test_create_pool_context_manager(self):
async def go():
async with aiopg.create_pool(host=self.host, user=self.user,
database=self.database,
password=self.password,
loop=self.loop) as pool:
async with pool.get() as conn:
async with conn.cursor() as cursor:
await cursor.execute('SELECT 42;')
resp = await cursor.fetchone()
assert resp == (42, )
assert cursor.closed
assert conn.closed
assert pool.closed
self.loop.run_until_complete(go())
def test_cursor_aiter(self):
async def go():
result = []
conn = await self.connect()
assert not conn.closed
async with conn:
cursor = await conn.cursor()
await cursor.execute('SELECT generate_series(1, 5);')
async for v in cursor:
result.append(v)
assert result == [(1,), (2, ), (3, ), (4, ), (5, )]
cursor.close()
assert conn.closed
self.loop.run_until_complete(go())
|
[
"[email protected]"
] | |
a4ad1faf3f8c1120766cdb1d029093ef98d85b5d
|
ddb38cabda8f8d1ad7c8a8b4af3698c4022ee9d6
|
/examples/link.py
|
9773c330149be4beda4f402ba295e9f813a6e7f8
|
[
"MIT"
] |
permissive
|
caofanCPU/rich
|
6bc282ca3310cee3aa35f87a507fe3d79dda6af7
|
cda20808ab645a239ac3538013bd6ba2d324bb45
|
refs/heads/master
| 2023-01-05T22:51:28.496542 | 2020-10-30T10:58:00 | 2020-10-30T10:58:00 | 289,209,964 | 2 | 0 |
MIT
| 2020-10-30T10:58:01 | 2020-08-21T07:45:54 | null |
UTF-8
|
Python
| false | false | 192 |
py
|
from rich import print
print("If your terminal supports links, the following text should be clickable:")
print("[link=https://www.willmcgugan.com][i]Visit [red]my[/red][/i] [yellow]Blog[/]")
|
[
"[email protected]"
] | |
a05ac4c0d8cc17a6a095d7611a3e500f21cf8e59
|
0b204928356d6825124787877b487b27bce19790
|
/exercises/Chapter 07/07-17.py
|
01e858f27ccd0bd3920b2a7326d164f1fb30dd65
|
[] |
no_license
|
shuxinzhang/nltk-learning
|
bff095f585bc42e697ca6cf523d71aec4a8aeeeb
|
0428ec5d73b325c91f1d82fb26324482ca69aae4
|
refs/heads/master
| 2021-01-01T04:43:26.536545 | 2017-07-26T13:37:06 | 2017-07-26T13:37:06 | 97,234,566 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 388 |
py
|
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
import nltk
'''
★
An n-gram chunker can use information other than the current
part-of-speech tag and the n-1 previous chunk tags.
Investigate other models of the context, such as
the n-1 previous part-of-speech tags, or some combination of
previous chunk tags along with previous and following part-of-speech tags.
'''
|
[
"[email protected]"
] | |
00ef7c0f0e8373c683ffa1edcfa145d5849fffaf
|
c6760258b3ad3dd912f0842b8ae03cbea188a8c4
|
/fsleyes/actions/addmaskdataseries.py
|
e298c1805fd70b7c1f2afcc59b0cd5e3f7029faa
|
[
"BSD-3-Clause",
"CC-BY-3.0",
"Apache-2.0"
] |
permissive
|
sanjayankur31/fsleyes
|
aa822f627cde38ec766180fb591c9af7d18d2126
|
46ccb4fe2b2346eb57576247f49714032b61307a
|
refs/heads/master
| 2020-04-09T08:41:18.380424 | 2018-12-03T11:44:51 | 2018-12-03T11:44:51 | 160,204,259 | 1 | 0 | null | 2018-12-03T14:31:31 | 2018-12-03T14:31:31 | null |
UTF-8
|
Python
| false | false | 9,453 |
py
|
#!/usr/bin/env python
#
# addmaskdataseries.py - The AddMaskDataSeriesAction class.
#
# Author: Paul McCarthy <[email protected]>
#
"""This module provides the :class:`AddMaskDataSeriesAction` class, an action
used by the :class:`.TimeSeriesPanel`.
"""
import wx
import numpy as np
import fsl.data.image as fslimage
import fsleyes.strings as strings
import fsleyes.plotting.dataseries as dataseries
from . import base
class AddMaskDataSeriesAction(base.Action):
"""The ``AddMaskDataSeriesAction`` class is used by the
:class:`.TimeSeriesPanel`.
It prompts the user to select a mask image for the currently selected
overlay (assumed to be a 4D time series :class:`.Image`), then extracts
the mean time series for the non-zero voxels within the mask, and adds
them as a :class:`.DataSeries` to the ``TimeSeriesPanel``.
"""
def __init__(self, overlayList, displayCtx, plotPanel):
"""Create an ``AddMaskDataSeriesAction``.
:arg overlayList: The :class:`.OverlayList`.
:arg displayCtx: The :class:`.DisplayContext`.
:arg plotPanel: The :class:`.TimeSeriesPanel`.
"""
base.Action.__init__(self, self.__addMaskDataSeries)
self.__overlayList = overlayList
self.__displayCtx = displayCtx
self.__plotPanel = plotPanel
self.__name = '{}_{}'.format(type(self).__name__, id(self))
self.__maskOptions = []
overlayList.addListener('overlays',
self.__name,
self.__overlayListChanged)
displayCtx .addListener('selectedOverlay',
self.__name,
self.__overlayListChanged)
self.__overlayListChanged()
def destroy(self):
"""Must be called when this ``AddMaskDataSeriesAction`` is no
longer in use.
"""
self.__overlayList.removeListener('overlays', self.__name)
self.__displayCtx .removeListener('selectedOverlay', self.__name)
self.__overlayList = None
self.__displayCtx = None
self.__plotPanel = None
self.__maskOptions = None
base.Action.destroy(self)
def __overlayListChanged(self, *a):
"""Called when the :class:`.OverlayList` changes. Updates the
:attr:`.Action.enabled` flag based on the currently selected
overlay, and the contents of the overlay list.
"""
overlay = self.__displayCtx.getSelectedOverlay()
if (len(self.__overlayList) == 0 or
(not isinstance(overlay, fslimage.Image))):
self.enabled = False
return
self.__maskOptions = [o for o in self.__overlayList if
isinstance(o, fslimage.Image) and
o is not overlay and
o.sameSpace(overlay)]
self.enabled = (overlay.ndim > 3 and len(self.__maskOptions) > 0)
def __addMaskDataSeries(self):
"""Run the ``AddMaskDataSeriesAction``. Prompt the user to select
a mask, using a :class:`MaskDialog`, then calculates the mean time
series in that mask, then adds that time series to the
:class:`.TimeSeriesPanel` that owns this action instance.
"""
overlay = self.__displayCtx.getSelectedOverlay()
opts = self.__displayCtx.getOpts(overlay)
options = self.__maskOptions
frame = wx.GetApp().GetTopWindow()
msg = strings.messages[self, 'selectMask'].format(overlay.name)
cbmsg = strings.messages[self, 'weighted']
title = strings.titles[ self, 'selectMask'].format(overlay.name)
dlg = MaskDialog(
frame,
[o.name for o in options],
title=title,
message=msg,
checkboxMessage=cbmsg)
if dlg.ShowModal() != wx.ID_OK:
return
maskimg = options[dlg.GetChoice()]
weight = dlg.GetCheckBox()
ds = dataseries.DataSeries(overlay,
self.__overlayList,
self.__displayCtx,
self.__plotPanel)
data = overlay.nibImage.get_data()[opts.index(atVolume=False)]
mask = maskimg.nibImage.get_data()
maskmask = mask > 0
ydata = data[maskmask]
# Weighted mean
if weight:
maskvals = mask[maskmask]
ydata = (maskvals * ydata.T).T
ydata = ydata.mean(axis=0)
xdata = np.arange(len(ydata))
ds.colour = self.__plotPanel.getOverlayPlotColour(overlay)
ds.alpha = 1
ds.lineWidth = 1
ds.lineStyle = '-'
ds.label = '{} [mask: {}]'.format(overlay.name, maskimg.name)
# We have to run the data through
# prepareDataSeries to e.g. scale
# the x axis by pixdims, and apply
# other plot settings
ds.setData(xdata, ydata)
ds.setData(*self.__plotPanel.prepareDataSeries(ds))
self.__plotPanel.dataSeries.append(ds)
class MaskDialog(wx.Dialog):
"""A dialog which displays some options to the user:
- A ``Choice`` widget containing a list of mask images
- A checkbox allowing the user to select whether to calculate
the weighted mean time series, weighted by the mask values,
or calculate the unweighted mean.
The selections are available via the :meth:`GetMask` and
:meth:`GetWeighted` methods
"""
def __init__(self,
parent,
choices,
title=None,
message=None,
checkbox=True,
checkboxMessage=None):
"""Create a ``ChoiceDialog``.
:arg parent: ``wx`` parent object.
:arg choices: List of strings, the choices to present to the
user.
:arg title: Dialog title
:arg message: Message to show above choice widget.
:arg checkbox: Show a checkbox
:arg checkboxMessage: Message to show alongside checkbox widget.
"""
if title is None: title = ''
if message is None: message = ''
if checkboxMessage is None: checkboxMessage = ''
wx.Dialog.__init__(self,
parent,
title=title,
style=wx.DEFAULT_DIALOG_STYLE)
self.__message = wx.StaticText(self, label=message)
self.__choice = wx.Choice(self, choices=choices)
if checkbox: self.__checkbox = wx.CheckBox(self, label=checkboxMessage)
else: self.__checkbox = None
self.__okButton = wx.Button(self, label='Ok', id=wx.ID_OK)
self.__cancelButton = wx.Button(self, label='Cancel', id=wx.ID_CANCEL)
self.__okButton .Bind(wx.EVT_BUTTON, self.__onOkButton)
self.__cancelButton.Bind(wx.EVT_BUTTON, self.__onCancelButton)
self.__okButton.SetDefault()
self.__mainSizer = wx.BoxSizer(wx.VERTICAL)
self.__buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
self.__buttonSizer.Add((1, 1), flag=wx.EXPAND, proportion=1)
self.__buttonSizer.Add(self.__okButton)
self.__buttonSizer.Add((5, 1), flag=wx.EXPAND)
self.__buttonSizer.Add(self.__cancelButton)
self.__mainSizer.Add(self.__message,
flag=wx.EXPAND | wx.ALL,
proportion=1,
border=20)
self.__mainSizer.Add(self.__choice,
flag=wx.EXPAND | wx.LEFT | wx.RIGHT,
border=20)
if checkbox:
self.__mainSizer.Add(self.__checkbox,
flag=wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP,
border=20)
self.__mainSizer.Add(self.__buttonSizer,
flag=wx.EXPAND | wx.ALL,
border=20)
self.SetSizer(self.__mainSizer)
self.Layout()
self.Fit()
self.CentreOnParent()
@property
def okButton(self):
"""Returns the OK button. """
return self.__okButton
@property
def cancelButton(self):
"""Returns the cancel button. """
return self.__cancelButton
@property
def checkbox(self):
"""Returns the checkbox. """
return self.__checkbox
@property
def choice(self):
"""Returns the choice widget. """
return self.__choice
def GetChoice(self):
"""Returns the index of the currently selected choice."""
return self.__choice.GetSelection()
def GetCheckBox(self):
"""Returns the index of the currently selected choice."""
if self.__checkbox is None:
raise RuntimeError('This dialog does not have a checkbox')
return self.__checkbox.GetValue()
def __onOkButton(self, ev):
"""Called when the ok button is pushed. """
self.EndModal(wx.ID_OK)
def __onCancelButton(self, ev):
"""Called when the cancel button is pushed. """
self.EndModal(wx.ID_CANCEL)
|
[
"[email protected]"
] | |
38cedd932c2f25213428fe2e550d32592b7fec2f
|
efbe970cb374d4416c2c500a495994397ea18dd5
|
/plugins/invites.py
|
bf0da04c4e1913d561661270ad7e34769e2cc491
|
[
"MIT"
] |
permissive
|
void-being/bepis-bot
|
f7d9fbc7663bb8a28c70e312fa4fb20c53c406c7
|
491b8de94b94384df6b26fa6a1325ee578020b7e
|
refs/heads/master
| 2020-07-11T17:28:10.080879 | 2018-11-15T23:44:06 | 2018-11-15T23:44:06 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,749 |
py
|
from logging import getLogger
from utils.common import GENERAL_CHANNEL
from utils.db import InviteDatabase, Database
from utils.deco import ensure_profile
from disco.api.http import APIException
from disco.bot import Plugin
class InvitePlug(Plugin):
def load(self, config):
self.invite_db = InviteDatabase()
self.db = Database("InvitePlug")
self.logger = getLogger("InvitePlug")
super().load(config)
self.logger.info("Finished loading invite plugin")
@Plugin.listen("GuildMemberAdd")
def on_member(self, event):
if self.invite_db.already_joined(event):
self.logger.info("User {0} has rejoined the server".format(event.user.id))
else:
for invite in self.invite_db:
try:
invite_obj = event.client.api.invites_get(invite['invite_code'])
print(invite_obj.uses, "/", invite_obj.max_uses)
except APIException:
self.logger.info("Invite revoked! Rewarding accordingly")
self.db.create_user(event.user)
invited = self.db.find_user(event.user.id)
inviter = self.db.find_user(invite['user_id'])
invited.bepis += 20
inviter.bepis += 30
event.client.api.channels_messages_create(
GENERAL_CHANNEL,
"Thanks for inviting <@{0}>, <@{1}>. You've earned 30 bepis and"
" <@{1}> earned 20 for using the referral link".format(invited.user_id, inviter.user_id)
)
self.invite_db.remove_invite(invite['invite_code'])
self.logger.info("Removed invite and rewarded users")
break
else:
self.db.create_user(event.user)
self.logger.info("Created account for User {0}".format(event.user.id))
@Plugin.command("invite")
@ensure_profile
def create_invite(self, event, user):
invite = self.invite_db.invites.find_one({"user_id": user.user_id})
if invite:
invite_code = invite['invite_code']
else:
invite = event.msg.channel.create_invite(
max_age=0,
max_uses=1,
unique=True
)
invite_code = invite.code
self.invite_db.register_invite(invite_code, user.user_id)
event.msg.reply("There! Here's your referral link. Whenever a person joins with this link, you'll get 30 bepis"
"and they'll get 20. Make sure to get a new link after inviting someone! https://discord.gg/"
+ invite_code)
|
[
"[email protected]"
] | |
5974adfcd647a183ebd24dc44138ee6aea00339a
|
284d146079ff247ce46a06f08e3651551ea4a6bd
|
/n_grams.py
|
934161b64db190d9a55cd08baa067f16ba2cf3e4
|
[] |
no_license
|
massyah/GRN-analysis
|
e872e36d7c07ad9674391c7ca017d3c55342ab7d
|
1f985555198693cc9be21d70d1c93050f0ad2f9a
|
refs/heads/master
| 2021-01-10T21:11:28.267953 | 2017-01-10T20:40:03 | 2017-01-10T20:40:03 | 1,071,136 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,664 |
py
|
#!/usr/bin/env python
# encoding: utf-8
import copy
# project="Th17 differentiation"
# termFile=project+"/articles_th17.txt"
# pubmed_files=[f for f in os.listdir("./"+project) if f.startswith("entrez ")]
stop_words=open("stop_words.txt").readlines()
stop_words=[x.strip() for x in stop_words]
occ_number={}
exemple_sentences={}
n_grams_occurences={}
def build_n_grams(n,table):
n_grams=[]
for i in range(0,len(table)-n+1):
n_grams.append(tuple(table[i:i+n]))
return n_grams
# toRemove=re.compile("|".join(["of","the","in","for","with","by","this","well","as"]))
def tokenize_sentence(s):
s=s.replace("(","")
s=s.replace(")","")
s=s.replace(",","")
splitted=[]
recognized=allTermsRe.findall(s)
for parts in allTermsRe.split(s):
if (parts in recognized):
parts=parts.lower()
if "_"+parts in allTerms:
parts="_"+parts
splitted.append(allTerms[parts].name)
else:
for w in parts.split():
if w.lower() in stop_words:
continue
w=w.lower()
w=w.strip(",.")
splitted.append(w.lower())
return splitted
def compute_occurences():
global occ_number,n_grams_occurences
occ_number={}
n_grams_occurences={}
for s in allSentences:
#we get rid of ()
s=s.string
splitted=[]
tokens=tokenize_sentence(s)
for tok in tokens:
if tok not in occ_number:
occ_number[tok]=0
occ_number[tok]+=1
if tok not in exemple_sentences:
exemple_sentences[tok]=[]
exemple_sentences[tok].append(s)
for i in range(2,6):
grams=build_n_grams(i,tokens)
for g in grams:
g=tuple(g)
if g not in n_grams_occurences:
n_grams_occurences[g]=0
if g not in exemple_sentences:
exemple_sentences[g]=[]
n_grams_occurences[g]+=1
exemple_sentences[g].append(s)
# print occ_number
#we first build the frequencies of terms related to all publications
allPublications={}
allSentences=[]
allTerms={}
allTermsRe=None
allPredicates=[]
uid=0
sentencesUid=0
evidencesUid=0
pubmedIdTopub={}
nxG=None
project="Th17 differentiation"
termFile="Th17 differentiation human/th17_human_terms.txt"
pubmed_files=[f for f in os.listdir("./"+project) if f.startswith("entrez ")]
parse_file(termFile)
compute_occurences()
occ_number_general=copy.copy(occ_number)
n_grams_occurences_general=copy.copy(n_grams_occurences)
exemple_sentences_general=copy.copy(exemple_sentences)
n_sents_general=len(allSentences)
print "General term frequencies computed"
allPublications={}
allSentences=[]
allPredicates=[]
pubmedIdTopub={}
nxG=None
project="Th17 differentiation human"
termFile=project+"/th17_human_terms.txt"
pubmed_files=[f for f in os.listdir("./"+project) if f.startswith("entrez ")]
pubmed_files.append("pubmed_result.txt")
parse_file(termFile)
occ_number={}
exemple_sentences={}
n_grams_occurences={}
compute_occurences()
n_sents=len(allSentences)
print "Human specific frequencies computed"
diff_freq={}
for k,v in occ_number.items():
if k not in diff_freq:
diff_freq[k]=0
if k not in occ_number_general:
diff_freq[k]=1.0*v/n_sents
else:
diff_freq[k]=1.0*occ_number[k]/n_sents-1.0*occ_number_general[k]/n_sents_general
#sort the dicts by freqs
occ_number=occ_number.items()
occ_number.sort(key=lambda x:x[1],reverse=True)
occ_number_general=occ_number_general.items()
occ_number_general.sort(key=lambda x:x[1],reverse=True)
n_grams_occurences=[x for x in n_grams_occurences.items() if x[1]>3]
n_grams_occurences.sort(key=lambda x:x[1],reverse=True)
n_grams_occurences_general=[x for x in n_grams_occurences_general.items() if x[1]>3]
n_grams_occurences_general.sort(key=lambda x:x[1],reverse=True)
diff_freq=diff_freq.items()
diff_freq.sort(key=lambda x:x[1],reverse=True)
|
[
"[email protected]"
] | |
3dbda848af9311c79540f19a16701a5fa967df65
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/KoubeiItemExtitemQueryResponse.py
|
dcc088d4e0ab28be40c13158601fdb346f4422aa
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 855 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.ExtItem import ExtItem
class KoubeiItemExtitemQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiItemExtitemQueryResponse, self).__init__()
self._extitem = None
@property
def extitem(self):
return self._extitem
@extitem.setter
def extitem(self, value):
if isinstance(value, ExtItem):
self._extitem = value
else:
self._extitem = ExtItem.from_alipay_dict(value)
def parse_response_content(self, response_content):
response = super(KoubeiItemExtitemQueryResponse, self).parse_response_content(response_content)
if 'extitem' in response:
self.extitem = response['extitem']
|
[
"[email protected]"
] | |
1f4f4feed8d738e787ed86e3d7022992d076cb0f
|
648f427a9d9a73720f76df972be2983354e13b61
|
/test/functional/p2p-leaktests.py
|
74f52f84f1e26ac02479c2fe64a636ad5c488e14
|
[
"MIT"
] |
permissive
|
aixinwang/Gfc
|
e659850e398dc9ab8b6a697b9262462a8e316e8a
|
4a7fdac234f5f51055e471e77aaff62cfa4c6eab
|
refs/heads/master
| 2021-04-03T08:33:03.198293 | 2018-03-14T04:32:38 | 2018-03-14T04:32:38 | 125,152,463 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,654 |
py
|
#!/usr/bin/env python3
# Copyright (c) 2017 The GFC coin bt developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test message sending before handshake completion.
A node should never send anything other than VERSION/VERACK/REJECT until it's
received a VERACK.
This test connects to a node and sends it a few messages, trying to intice it
into sending us something it shouldn't.
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
banscore = 10
class CLazyNode(NodeConnCB):
def __init__(self):
super().__init__()
self.unexpected_msg = False
self.ever_connected = False
def bad_message(self, message):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
def on_open(self, conn):
self.connected = True
self.ever_connected = True
def on_version(self, conn, message): self.bad_message(message)
def on_verack(self, conn, message): self.bad_message(message)
def on_reject(self, conn, message): self.bad_message(message)
def on_inv(self, conn, message): self.bad_message(message)
def on_addr(self, conn, message): self.bad_message(message)
def on_alert(self, conn, message): self.bad_message(message)
def on_getdata(self, conn, message): self.bad_message(message)
def on_getblocks(self, conn, message): self.bad_message(message)
def on_tx(self, conn, message): self.bad_message(message)
def on_block(self, conn, message): self.bad_message(message)
def on_getaddr(self, conn, message): self.bad_message(message)
def on_headers(self, conn, message): self.bad_message(message)
def on_getheaders(self, conn, message): self.bad_message(message)
def on_ping(self, conn, message): self.bad_message(message)
def on_mempool(self, conn): self.bad_message(message)
def on_pong(self, conn, message): self.bad_message(message)
def on_feefilter(self, conn, message): self.bad_message(message)
def on_sendheaders(self, conn, message): self.bad_message(message)
def on_sendcmpct(self, conn, message): self.bad_message(message)
def on_cmpctblock(self, conn, message): self.bad_message(message)
def on_getblocktxn(self, conn, message): self.bad_message(message)
def on_blocktxn(self, conn, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
def on_open(self, conn):
super().on_open(conn)
for i in range(banscore):
self.send_message(msg_verack())
def on_reject(self, conn, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
self.version_received = False
super().__init__()
def on_reject(self, conn, message): pass
def on_verack(self, conn, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
def on_version(self, conn, message):
self.version_received = True
conn.send_message(msg_ping())
conn.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.extra_args = [['-banscore='+str(banscore)]]
def run_test(self):
no_version_bannode = CNodeNoVersionBan()
no_version_idlenode = CNodeNoVersionIdle()
no_verack_idlenode = CNodeNoVerackIdle()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_bannode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_version_idlenode, send_version=False))
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], no_verack_idlenode))
no_version_bannode.add_connection(connections[0])
no_version_idlenode.add_connection(connections[1])
no_verack_idlenode.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
assert wait_until(lambda: no_version_bannode.ever_connected, timeout=10)
assert wait_until(lambda: no_version_idlenode.ever_connected, timeout=10)
assert wait_until(lambda: no_verack_idlenode.version_received, timeout=10)
# Mine a block and make sure that it's not sent to the connected nodes
self.nodes[0].generate(1)
#Give the node enough time to possibly leak out a message
time.sleep(5)
#This node should have been banned
assert not no_version_bannode.connected
[conn.disconnect_node() for conn in connections]
# Make sure no unexpected messages came in
assert(no_version_bannode.unexpected_msg == False)
assert(no_version_idlenode.unexpected_msg == False)
assert(no_verack_idlenode.unexpected_msg == False)
if __name__ == '__main__':
P2PLeakTest().main()
|
[
"[email protected]"
] | |
4be69bac70655e225aa2eeed666f85b2c6cf5500
|
680bd46e8eae20e78a425f766432711a47235374
|
/models/big_number_item.py
|
1844314793bf8a7d60cf2454e718fdbf79835d64
|
[
"Apache-2.0"
] |
permissive
|
ILMostro/lm-sdk-python
|
9f45217d64c0fc49caf2f4b279a124c2efe3d24d
|
40da5812ab4d50dd1c6c3c68f7ea13c4d8f4fb49
|
refs/heads/master
| 2022-02-01T16:51:12.810483 | 2019-07-16T17:54:11 | 2019-07-16T17:54:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,277 |
py
|
# coding: utf-8
"""
LogicMonitor REST API
LogicMonitor is a SaaS-based performance monitoring platform that provides full visibility into complex, hybrid infrastructures, offering granular performance monitoring and actionable data and insights. logicmonitor_sdk enables you to manage your LogicMonitor account programmatically. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from logicmonitor_sdk.models.color_threshold import ColorThreshold # noqa: F401,E501
class BigNumberItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'bottom_label': 'str',
'color_thresholds': 'list[ColorThreshold]',
'data_point_name': 'str',
'position': 'int',
'right_label': 'str',
'rounding': 'int',
'use_comma_separators': 'bool'
}
attribute_map = {
'bottom_label': 'bottomLabel',
'color_thresholds': 'colorThresholds',
'data_point_name': 'dataPointName',
'position': 'position',
'right_label': 'rightLabel',
'rounding': 'rounding',
'use_comma_separators': 'useCommaSeparators'
}
def __init__(self, bottom_label=None, color_thresholds=None, data_point_name=None, position=None, right_label=None, rounding=None, use_comma_separators=None): # noqa: E501
"""BigNumberItem - a model defined in Swagger""" # noqa: E501
self._bottom_label = None
self._color_thresholds = None
self._data_point_name = None
self._position = None
self._right_label = None
self._rounding = None
self._use_comma_separators = None
self.discriminator = None
if bottom_label is not None:
self.bottom_label = bottom_label
if color_thresholds is not None:
self.color_thresholds = color_thresholds
self.data_point_name = data_point_name
if position is not None:
self.position = position
if right_label is not None:
self.right_label = right_label
if rounding is not None:
self.rounding = rounding
self.use_comma_separators = use_comma_separators
@property
def bottom_label(self):
"""Gets the bottom_label of this BigNumberItem. # noqa: E501
:return: The bottom_label of this BigNumberItem. # noqa: E501
:rtype: str
"""
return self._bottom_label
@bottom_label.setter
def bottom_label(self, bottom_label):
"""Sets the bottom_label of this BigNumberItem.
:param bottom_label: The bottom_label of this BigNumberItem. # noqa: E501
:type: str
"""
self._bottom_label = bottom_label
@property
def color_thresholds(self):
"""Gets the color_thresholds of this BigNumberItem. # noqa: E501
:return: The color_thresholds of this BigNumberItem. # noqa: E501
:rtype: list[ColorThreshold]
"""
return self._color_thresholds
@color_thresholds.setter
def color_thresholds(self, color_thresholds):
"""Sets the color_thresholds of this BigNumberItem.
:param color_thresholds: The color_thresholds of this BigNumberItem. # noqa: E501
:type: list[ColorThreshold]
"""
self._color_thresholds = color_thresholds
@property
def data_point_name(self):
"""Gets the data_point_name of this BigNumberItem. # noqa: E501
:return: The data_point_name of this BigNumberItem. # noqa: E501
:rtype: str
"""
return self._data_point_name
@data_point_name.setter
def data_point_name(self, data_point_name):
"""Sets the data_point_name of this BigNumberItem.
:param data_point_name: The data_point_name of this BigNumberItem. # noqa: E501
:type: str
"""
if data_point_name is None:
raise ValueError("Invalid value for `data_point_name`, must not be `None`") # noqa: E501
self._data_point_name = data_point_name
@property
def position(self):
"""Gets the position of this BigNumberItem. # noqa: E501
:return: The position of this BigNumberItem. # noqa: E501
:rtype: int
"""
return self._position
@position.setter
def position(self, position):
"""Sets the position of this BigNumberItem.
:param position: The position of this BigNumberItem. # noqa: E501
:type: int
"""
self._position = position
@property
def right_label(self):
"""Gets the right_label of this BigNumberItem. # noqa: E501
:return: The right_label of this BigNumberItem. # noqa: E501
:rtype: str
"""
return self._right_label
@right_label.setter
def right_label(self, right_label):
"""Sets the right_label of this BigNumberItem.
:param right_label: The right_label of this BigNumberItem. # noqa: E501
:type: str
"""
self._right_label = right_label
@property
def rounding(self):
"""Gets the rounding of this BigNumberItem. # noqa: E501
:return: The rounding of this BigNumberItem. # noqa: E501
:rtype: int
"""
return self._rounding
@rounding.setter
def rounding(self, rounding):
"""Sets the rounding of this BigNumberItem.
:param rounding: The rounding of this BigNumberItem. # noqa: E501
:type: int
"""
self._rounding = rounding
@property
def use_comma_separators(self):
"""Gets the use_comma_separators of this BigNumberItem. # noqa: E501
:return: The use_comma_separators of this BigNumberItem. # noqa: E501
:rtype: bool
"""
return self._use_comma_separators
@use_comma_separators.setter
def use_comma_separators(self, use_comma_separators):
"""Sets the use_comma_separators of this BigNumberItem.
:param use_comma_separators: The use_comma_separators of this BigNumberItem. # noqa: E501
:type: bool
"""
if use_comma_separators is None:
raise ValueError("Invalid value for `use_comma_separators`, must not be `None`") # noqa: E501
self._use_comma_separators = use_comma_separators
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(BigNumberItem, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BigNumberItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
3a056a7330c5f4fa3afb79da1bd70e37f1fd2f32
|
9e4eab70447a892becf36cec0e656318d27a26f3
|
/core/middlewares/common.py
|
f10909e45820993e0991bb19f280a120a1d8637d
|
[
"MIT"
] |
permissive
|
aldwyn-acn/effigia
|
7afa8f52641e2cdcbf69a66a593a4a58191c9b9b
|
cd105ee3a938785791cff474fd2959352a41a6a6
|
refs/heads/master
| 2020-03-31T01:04:49.710702 | 2019-07-30T23:12:32 | 2019-07-30T23:12:32 | 151,765,928 | 0 | 0 | null | 2018-10-05T19:03:27 | 2018-10-05T19:03:27 | null |
UTF-8
|
Python
| false | false | 1,629 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from actstream import action
from django.utils.deprecation import MiddlewareMixin
from apps.galleries.views import GalleryItemView
from apps.galleries.models import Gallery
from apps.portfolios.views import PortfolioItemView
from apps.portfolios.models import Portfolio
from apps.groups.views import GroupItemView
from apps.groups.models import Group
MODELS_FOR_SAVING_VISITS = {
GalleryItemView: Gallery,
GroupItemView: Group,
PortfolioItemView: Portfolio,
}
class MiddlewareObjectMixin(object):
def get_object(self, klass, **kwargs):
if kwargs.get('slug'):
return klass.objects.get(slug=kwargs['slug'])
def is_item_view(self, view_func, view_kwargs):
return (hasattr(view_func, 'view_class') and
view_func.view_class in MODELS_FOR_SAVING_VISITS and
view_kwargs.get('slug'))
class EffigiaVisitMiddleware(MiddlewareObjectMixin, MiddlewareMixin):
def process_view(self, request, view_func, view_args, view_kwargs):
""" Save an action for the visited objects of the current user """
if (self.is_item_view(view_func, view_kwargs)):
klass = MODELS_FOR_SAVING_VISITS[view_func.view_class]
obj = self.get_object(klass, **view_kwargs)
if request.user.is_authenticated():
action.send(request.user, verb='visited a %s' % obj._meta.verbose_name, target=obj)
else:
obj.anonymous_visits_count += 1
obj.save()
return view_func(request, *view_args, **view_kwargs)
|
[
"[email protected]"
] | |
64286712f027df5f47a92489c32b3602446d79b1
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/40/usersdata/102/24477/submittedfiles/funcoes.py
|
85696e1d61292798d9f6040a5edaa4363fc470ec
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,141 |
py
|
from __future__ import division
import numpy as np
def calcula_valor_absoluto(x):
if x<0:
x=x*(-1)
return x
else:
return x
def cacula_pi(m):
soma=0
i=1
j=2
while i<=m:
if 1<=m and m<=2000:
if i%2==0:
soma=soma-(4/(j*(j+1)*(j+2)))
else:
soma=soma+(4/(j*(j+1)*(j+2)))
i=i+1
j=j+2
pi=3+soma
return pi
def fatorial(a):
fatorial=1
for i in range(1,a+1,1):
fatorial=fatorial*i
return fatorial
def calcula_cos_seno(z,epsilon):
i=1
j=2
soma=0
termo=((2**j)/fatorial(j))
while termo>epsilon:
if i%2!=0:
soma=soma-termo
else:
somaa=soma+termo
i=i+1
j=j+2
termo=((2**j)/aatorial(j))
cosseno=1+soma
return cosseno
def calcula_razao_aurea(m,epsilon):
calcula_razao_aurea=2*(calcula_cos_seno(calcula_pi(m)/5.0,epsilon))
return razao_aurea
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.