blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
445409926fa5283911011a77f745099c9cf58d53 | 99a310f6bb6c7a6c728f1b3ae78054487372042d | /aoc2017/day9.py | 915cef5559e604d0f1001897d7c7e96a442ff7ed | [] | no_license | jepebe/aoc2018 | 46ce6b46479a0faf2c2970413af14a071dcfdb79 | 4bf91b99bec4b59529533ef70f24bf6496bada99 | refs/heads/master | 2023-01-11T16:44:42.125394 | 2023-01-06T06:27:14 | 2023-01-06T06:27:14 | 159,912,721 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | def process(data):
group_score = 0
garbage_count = 0
stack =[]
garbage = False
ignore = False
for c in data:
if ignore:
ignore = False
continue
if c == '{' and not garbage:
stack.append(len(stack) + 1)
elif c == '}' and not garbage:
group_score += stack.pop()
elif c == '<' and not garbage:
garbage = True
elif c == '>' and garbage:
garbage = False
elif c == '!':
ignore = True
elif garbage:
garbage_count += 1
assert len(stack) == 0
return group_score, garbage_count
if __name__ == '__main__':
assert process('<>') == (0, 0)
assert process('<random characters>') == (0, 17)
assert process('<<<<>') == (0, 3)
assert process('<{!>}>') == (0, 2)
assert process('<!!>') == (0, 0)
assert process('<!!!>>') == (0, 0)
assert process('<{o"i!a,<{i<a>') == (0, 10)
assert process('{}') == (1, 0)
assert process('{{{}}}') == (6, 0)
assert process('{{{},{},{{}}}}') == (16, 0)
assert process('{<a>,<a>,<a>,<a>}') == (1, 4)
assert process('{{<a>},{<a>},{<a>},{<a>}}') == (9, 4)
assert process('{{<!>},{<!>},{<!>},{<a>}}') == (3, 13)
assert process('{{<!!>},{<!!>},{<!!>},{<!!>}}') == (9, 0)
with open('day9.txt', 'r') as f:
data = f.read()
print(process(data))
| [
"[email protected]"
] | |
34974698db983346e41e782fa77394e2c568893b | b4874cbd7299492277ad28441bad05e6348307f2 | /dummies/zerg/zerg_random.py | b950007a6b9104c741a3ee370c8d3c20e56d79a4 | [
"MIT"
] | permissive | MadManSC2/sharpy-sc2 | 7d405578413c7a8f8fc1e4030ad719d7fe5df10a | 13950357df2db58033daab24f076e3ae83f0b2a8 | refs/heads/master | 2021-01-05T03:38:58.038563 | 2020-03-07T20:35:24 | 2020-03-07T20:35:24 | 240,865,466 | 1 | 0 | MIT | 2020-02-16T09:38:05 | 2020-02-16T09:38:04 | null | UTF-8 | Python | false | false | 400 | py | import random
val = random.randint(0, 5)
if val == 0:
from .lings import LadderBot
elif val == 1:
from .macro_roach import LadderBot
elif val == 2:
from .macro_zerg_v2 import LadderBot
elif val == 3:
from .mutalisk import LadderBot
elif val == 4:
from .roach_hydra import LadderBot
elif val == 5:
from .twelve_pool import LadderBot
class RandomZergBot(LadderBot):
pass
| [
"[email protected]"
] | |
ace60c9d9b2a94871157f75b667861d20615f347 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nncerberu.py | a13c5cb174a2f251e38dad6efed5b572a5b2cd0e | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 277 | py | ii = [('SadlMLP.py', 1), ('PettTHE.py', 2), ('ClarGE2.py', 3), ('GellWPT2.py', 1), ('CarlTFR.py', 1), ('RoscTTI3.py', 1), ('AinsWRR3.py', 1), ('CookGHP2.py', 1), ('CoolWHM.py', 1), ('WestJIT2.py', 1), ('MedwTAI.py', 2), ('GodwWLN.py', 2), ('MedwTAI2.py', 1), ('BentJRP.py', 1)] | [
"[email protected]"
] | |
8c4eecccb5304b7457c69b24b8d130d4c73a3c7f | 1680edad321979cdf9f655ace5533f67c4ae6589 | /client_support/client_support/doctype/email/email.py | 8ab950a133bf6e6b679ac6897b073e95300c934b | [] | no_license | ssindham/Client_Support | 18a28bd6f55807b1c07ff233a839a2207f039874 | 1fc59526f27ead426f5ce9ac8f582e5441b05410 | refs/heads/master | 2021-06-18T11:27:33.218878 | 2017-06-26T12:59:42 | 2017-06-26T12:59:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Makarand Bauskar and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class email(Document):
pass
| [
"[email protected]"
] | |
8c1c78e9cfc2ae9d9094e296aac2b89167f3d58d | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-lts/huaweicloudsdklts/v2/model/createfavorite_request.py | 2cf325fae708d647ca8861334b066288102c28be | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 3,135 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CreatefavoriteRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'body': 'CreatefavoriteReqbody'
}
attribute_map = {
'body': 'body'
}
def __init__(self, body=None):
"""CreatefavoriteRequest
The model defined in huaweicloud sdk
:param body: Body of the CreatefavoriteRequest
:type body: :class:`huaweicloudsdklts.v2.CreatefavoriteReqbody`
"""
self._body = None
self.discriminator = None
if body is not None:
self.body = body
@property
def body(self):
"""Gets the body of this CreatefavoriteRequest.
:return: The body of this CreatefavoriteRequest.
:rtype: :class:`huaweicloudsdklts.v2.CreatefavoriteReqbody`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this CreatefavoriteRequest.
:param body: The body of this CreatefavoriteRequest.
:type body: :class:`huaweicloudsdklts.v2.CreatefavoriteReqbody`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreatefavoriteRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
b4cda9c4c5944bb55bd00d0c587319d667ec5e35 | 85fa329cadd8edb7aa8ad32d573a1da91445c676 | /RSVP_MVPA/MVPA_multi_methods/accs_multi_classifiers/do_MVPA_alltime_eeg.py | 610f5f953b3b7c06808c11dcda1f379a0783158c | [] | no_license | listenzcc/RSVP_scripts | 05aaed6d1aded2c3b1851ece61f52442c8a9eba8 | e01a60c980c2bf6a002f2673a5b8984d3ad70f6e | refs/heads/master | 2020-05-02T19:33:43.838999 | 2019-07-03T01:13:25 | 2019-07-03T01:13:25 | 178,161,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,947 | py | # coding: utf-8
'''
This script is to do MVPA on MEG RSVP dataset
'''
import matplotlib.pyplot as plt
import mne
import numpy as np
import os
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import MinMaxScaler
import time
import pdb
'''
# Function: Setting MVPA stuff.
# Output: cv, cross-validation maker.
# Output: pca_pipeline, pipeline of pca decomposition.
# Output: xdawn_pipeline, pipeline of xdawn filter.
# Output: clf_*, classifier of svm and lr.
'''
xdawn = mne.preprocessing.Xdawn(n_components=8)
cv = StratifiedKFold(n_splits=10, shuffle=True)
normalize_pipeline = make_pipeline(mne.decoding.Vectorizer(), MinMaxScaler())
clf_svm_rbf = svm.SVC(gamma='scale', kernel='rbf', class_weight='balanced', verbose=True)
clf_svm_linear = svm.SVC(gamma='scale', kernel='linear', class_weight='balanced', verbose=True)
clf_lr = LogisticRegression(class_weight='balanced', verbose=True)
def report_results(true_label, pred_label, title=None):
print(title)
report = classification_report(true_label, pred_label, target_names=['odd', 'norm'])
print(report)
if title is None:
return
with open(os.path.join(results_dir, '%s.txt' % title), 'w') as f:
f.writelines(report)
'''
# Function: Setting evrionment for the script.
# Output: root_path, directory of project.
# Output: time_stamp, string of beginning time of the script.
# Output: id_string, customer identifier string.
# Output: results_dir, directory for storing results.
'''
root_dir = os.path.join('/nfs/cell_a/userhome/zcc/documents/RSVP_experiment/')
time_stamp = time.strftime('%Y-%m-%d-%H-%M-%S')
id_string = 'RSVP_MEG'
results_dir = os.path.join(root_dir, 'RSVP_MVPA', 'MVPA_lr')
epochs_dir = os.path.join(root_dir, 'epochs_saver', 'epochs_freq_0.5_30_crop_n0.2_p1.1')
read_save_stuff = {}
read_save_stuff['S01'] = dict(
range_run = range(1, 11),
epochs_path = os.path.join(epochs_dir, 'eeg_S01_epochs_%d-epo.fif'),
report_path = os.path.join(results_dir, 'accs_eeg_S01.txt'))
read_save_stuff['S01'] = dict(
range_run = range(1, 11),
epochs_path = os.path.join(epochs_dir, 'eeg_S02_epochs_%d-epo.fif'),
report_path = os.path.join(results_dir, 'accs_eeg_S02.txt'))
for stuff in read_save_stuff.values():
print('-'*80)
for e in stuff.items():
print(e[0], e[1])
'''
# Function: Reading epochs.
'''
labels = None
epochs_data = None
epochs_list = []
for i in stuff['range_run']:
# Function: Reading epochs from -epo.fif.
epo_path = os.path.join(stuff['epochs_path'] % i)
epochs = mne.read_epochs(epo_path, verbose=True)
epochs.crop(tmin=0.0, tmax=1.0)
# Attention!!!
# This may cause poor alignment between epochs.
# But this is necessary for concatenate_epochs.
if epochs_list.__len__() != 0:
epochs.info['dev_head_t'] = epochs_list[0].info['dev_head_t']
epochs_list.append(epochs)
# Function: Preparing dataset for MVPA.
if labels is None:
labels = epochs.events[:, -1]
epochs_data = epochs.get_data()
else:
labels = np.concatenate([labels, epochs.events[:, -1]])
epochs_data = np.concatenate([epochs_data, epochs.get_data()], 0)
epochs = mne.epochs.concatenate_epochs(epochs_list)
'''
# Function: Repeat training and testing.
# Output:
'''
sfreq = epochs.info['sfreq']
w_length = int(sfreq * 0.1) # running classifier: window length
w_step = int(sfreq * 0.05) # running classifier: window step size
w_start = np.arange(0, epochs.get_data().shape[2] - w_length, w_step)
# init preds results.
preds_xdawn_svm_rbf = np.empty([len(labels), len(w_start)+1])
preds_xdawn_svm_linear = np.empty([len(labels), len(w_start)+1])
preds_xdawn_lr = np.empty([len(labels), len(w_start)+1])
for train, test in cv.split(epochs_data, labels):
print('-' * 80)
# xdawn
xdawn_data_train = xdawn.fit_transform(epochs[train])
xdawn_data_test = xdawn.transform(epochs[test])
data_train_ = xdawn_data_train[:, :, :]
data_test_ = xdawn_data_test[:, :, :]
# SVM rbf
clf_svm_rbf.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_rbf[test, len(w_start)] = clf_svm_rbf.predict(normalize_pipeline.transform(data_test_))
# SVM linear
clf_svm_linear.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_linear[test, len(w_start)] = clf_svm_linear.predict(normalize_pipeline.transform(data_test_))
# LR
clf_lr.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_lr[test, len(w_start)] = clf_lr.predict(normalize_pipeline.transform(data_test_))
for j, start in enumerate(w_start):
print(j, start)
# xdawn
data_train_ = xdawn_data_train[:, :, start:start+w_length]
data_test_ = xdawn_data_test[:, :, start:start+w_length]
# SVM rbf
clf_svm_rbf.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_rbf[test, j] = clf_svm_rbf.predict(normalize_pipeline.transform(data_test_))
# SVM linear
clf_svm_linear.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_svm_linear[test, j] = clf_svm_linear.predict(normalize_pipeline.transform(data_test_))
# LR
clf_lr.fit(normalize_pipeline.fit_transform(data_train_), labels[train])
preds_xdawn_lr[test, j] = clf_lr.predict(normalize_pipeline.transform(data_test_))
'''
# Function: Save report into file.
'''
fpath = os.path.join(stuff['report_path'])
with open(fpath, 'w') as f:
report_svm_rbf = classification_report(preds_xdawn_svm_rbf[:, len(w_start)], labels, target_names=['odd', 'norm'])
print(report_svm_rbf)
f.writelines('\n[all_SVM_rbf]\n')
f.writelines(report_svm_rbf)
report_svm_linear = classification_report(preds_xdawn_svm_linear[:, len(w_start)], labels, target_names=['odd', 'norm'])
print(report_svm_linear)
f.writelines('\n[all_SVM_linear]\n')
f.writelines(report_svm_linear)
report_lr = classification_report(preds_xdawn_lr[:, len(w_start)], labels, target_names=['odd', 'norm'])
print(report_lr)
f.writelines('\n[all_LR]\n')
f.writelines(report_lr)
for j, start in enumerate(w_start):
print(j)
report_svm_rbf = classification_report(preds_xdawn_svm_rbf[:, j], labels, target_names=['odd', 'norm'])
with open(fpath, 'a') as f:
print(report_svm_rbf)
f.writelines('\n[%d-%d, %f, %f, SVM_rbf]\n' % (start, start+w_length, epochs.times[start], epochs.times[start+w_length]))
f.writelines(report_svm_rbf)
report_svm_linear = classification_report(preds_xdawn_svm_linear[:, j], labels, target_names=['odd', 'norm'])
with open(fpath, 'a') as f:
print(report_svm_linear)
f.writelines('\n[%d-%d, %f, %f, SVM_linear]\n' % (start, start+w_length, epochs.times[start], epochs.times[start+w_length]))
f.writelines(report_svm_linear)
report_lr = classification_report(preds_xdawn_lr[:, j], labels, target_names=['odd', 'norm'])
with open(fpath, 'a') as f:
print(report_lr)
f.writelines('\n[%d-%d, %f, %f, LR]\n' % (start, start+w_length, epochs.times[start], epochs.times[start+w_length]))
f.writelines(report_lr)
| [
"[email protected]"
] | |
007ce13e8ee357d1d8bf7b8c6463e0e1c51061a8 | 2ecfe49cd576fbf0cf88d64020bd3b321866e7ed | /src/azure-cli-core/azure/cli/core/aaz/_field_type.py | 9cc5ec8835588ca50d0b538f696b3663e0bfac31 | [
"MIT",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MPL-2.0",
"LGPL-2.1-only",
"Apache-2.0",
"LGPL-2.1-or-later",
"BSD-2-Clause"
] | permissive | c-ryan-k/azure-cli | ebd3d2347503a540bdc99f6d93d8ae1eda454c52 | fa2a2d8ea269b43d8c397f9bd5c43bfc97258876 | refs/heads/dev | 2022-10-26T03:16:07.347991 | 2022-10-14T07:06:13 | 2022-10-14T07:06:13 | 148,374,111 | 0 | 0 | MIT | 2021-12-07T17:49:19 | 2018-09-11T20:09:42 | Python | UTF-8 | Python | false | false | 12,116 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import OrderedDict
from ._base import AAZBaseType, AAZValuePatch, AAZUndefined
from ._field_value import AAZObject, AAZDict, AAZList, AAZSimpleValue
from .exceptions import AAZUnknownFieldError, AAZConflictFieldDefinitionError, AAZValuePrecisionLossError, \
AAZInvalidFieldError
# pylint: disable=protected-access, too-few-public-methods, isinstance-second-argument-not-valid-type
# pylint: disable=too-many-instance-attributes
# build in types
class AAZSimpleType(AAZBaseType):
"""Simple value type"""
DataType = None
_ValueCls = AAZSimpleValue
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def process_data(self, data, **kwargs):
if data == None: # noqa: E711, pylint: disable=singleton-comparison
# data can be None or AAZSimpleValue == None
if self._nullable:
return None
return AAZValuePatch.build(self)
if isinstance(data, AAZSimpleValue):
if data._is_patch:
# return value patch
return AAZValuePatch.build(self)
data = data._data
assert self.DataType is not None and isinstance(data, self.DataType), \
f'Expect {self.DataType}, got {data} ({type(data)}'
return data
class AAZIntType(AAZSimpleType):
DataType = int
class AAZStrType(AAZSimpleType):
DataType = str
class AAZBoolType(AAZSimpleType):
DataType = bool
class AAZFloatType(AAZSimpleType):
DataType = float
def process_data(self, data, **kwargs):
if data == None: # noqa: E711, pylint: disable=singleton-comparison
# data can be None or AAZSimpleValue == None
if self._nullable:
return None
return AAZValuePatch.build(self)
if isinstance(data, AAZSimpleValue):
if data._is_patch:
# return value patch
return AAZValuePatch.build(self)
data = data._data
if isinstance(data, int):
# transform int to float
if float(data) != data:
raise AAZValuePrecisionLossError(data, float(data))
data = float(data)
assert isinstance(data, self.DataType), f'Expect {self.DataType}, got {data} ({type(data)}'
return data
# compound types
class AAZObjectType(AAZBaseType):
"""Object value type"""
_PROTECTED_KEYWORDS = (
"get_attr_name",
"process_data",
"to_serialized_data",
"discriminate_by",
"get_discriminator"
) # these keywords are used in AAZObjectValue and AAZObjectType, object should not use them as a field name.
_ValueCls = AAZObject
_PatchDataCls = dict
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# It's important to keep the order of fields.
# This feature can help to resolve arguments interdependent problem.
# aaz-dev should register fields based on the interdependent order.
self._fields = OrderedDict()
self._fields_alias_map = {} # key is the option, value is field
# Polymorphism support
self._discriminator_field_name = None
self._discriminators = OrderedDict()
def __getitem__(self, key):
name = self.get_attr_name(key)
if name not in self._fields:
# must raise AttributeError to support hasattr check
raise AAZUnknownFieldError(self, key)
return self._fields[name]
def __setitem__(self, key, value):
assert not key.startswith('_')
assert key not in self._PROTECTED_KEYWORDS
if not isinstance(value, AAZBaseType):
raise AAZInvalidFieldError(self, key, f"unknown field type {type(value)}")
if hasattr(self, key):
# key should not be defined before
raise AAZConflictFieldDefinitionError(
self, key, "Key already been defined before")
name = key
value._name = name
self._fields[name] = value
# update alias map
aliases = [*value._options] if value._options else []
if value._serialized_name:
aliases.append(value._serialized_name)
for alias in aliases:
if alias == name:
continue
assert not alias.startswith('_')
assert alias not in self._PROTECTED_KEYWORDS
if alias in self._fields_alias_map and self._fields_alias_map[alias] != name:
raise AAZConflictFieldDefinitionError(
self, name, f"Alias is already used by other field: {self._fields_alias_map[alias]}")
self._fields_alias_map[alias] = name
def __getattr__(self, key):
return self[key]
def __setattr__(self, key, value):
if key.startswith('_'):
assert not isinstance(value, AAZBaseType)
self.__dict__[key] = value
else:
self[key] = value
def get_attr_name(self, key):
if key in self._fields:
return key
if key in self._fields_alias_map:
return self._fields_alias_map[key]
return None
def process_data(self, data, **kwargs):
if data == None: # noqa: E711, pylint: disable=singleton-comparison
# data can be None or AAZSimpleValue == None
if self._nullable:
return None
return AAZValuePatch.build(self)
if isinstance(data, AAZObject) and data._is_patch:
# use value patch
result = AAZValuePatch.build(self)
else:
result = {}
value = AAZObject(schema=self, data=result)
if isinstance(data, AAZObject):
if self._discriminator_field_name:
# assign discriminator field first
for key in data._data.keys():
name = self.get_attr_name(key)
if name == self._discriminator_field_name:
value[name] = data[key]
break
for key in data._data.keys():
if not hasattr(value, key):
# ignore undefined key
continue
value[key] = data[key]
else:
assert isinstance(data, (dict,))
if self._discriminator_field_name:
# assign discriminator field first
for key, sub_data in data.items():
name = self.get_attr_name(key)
if name == self._discriminator_field_name:
value[name] = sub_data
break
for key, sub_data in data.items():
if not hasattr(value, key):
# ignore undefined key
continue
value[key] = sub_data
return result
# Polymorphism support
def discriminate_by(self, key, data, schema=None):
name = self.get_attr_name(key)
if name not in self._fields:
raise AAZUnknownFieldError(self, key)
field = self._fields[name]
if not isinstance(field, AAZStrType):
raise AAZInvalidFieldError(self, name, f"Invalid discriminator field type: {type(field)}")
data = field.process_data(data)
if self._discriminator_field_name is None:
self._discriminator_field_name = name
elif self._discriminator_field_name != name:
raise AAZConflictFieldDefinitionError(
self, name, f"Conflict discriminator field name with: {self._discriminator_field_name}")
schema = schema or AAZObjectType() # use provided schema or create a object type
assert isinstance(schema, AAZObjectType)
return self._discriminators.setdefault(data, schema)
def get_discriminator(self, data):
if self._discriminator_field_name is None:
return None
if data == AAZUndefined or not data:
return None
if isinstance(data, AAZObject):
data = data._data
assert isinstance(data, dict)
for key, field_data in data.items():
name = self.get_attr_name(key)
if name == self._discriminator_field_name:
return self._discriminators.get(field_data, None)
return None
class AAZDictType(AAZBaseType):
"""Dict value type"""
_ValueCls = AAZDict
_PatchDataCls = dict
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._element = None
@property
def Element(self):
if self._element is None:
raise AAZUnknownFieldError(self, "Element")
return self._element
@Element.setter
def Element(self, value):
if self._element is None:
assert isinstance(value, AAZBaseType)
self._element = value
assert self._element._name is None
assert not self._element._options
assert self._element._serialized_name is None
elif self._element != value:
raise AAZConflictFieldDefinitionError(self, "Element", "Redefine element in different schema")
def __getitem__(self, key):
return self.Element
def process_data(self, data, **kwargs):
if data == None: # noqa: E711, pylint: disable=singleton-comparison
# data can be None or AAZSimpleValue == None
if self._nullable:
return None
return AAZValuePatch.build(self)
if isinstance(data, AAZDict) and data._is_patch:
# use value patch
result = AAZValuePatch.build(self)
else:
result = {}
value = AAZDict(schema=self, data=result)
if isinstance(data, AAZDict):
for key in data._data.keys():
value[key] = data[key]
else:
assert isinstance(data, (dict,))
for key, sub_data in data.items():
value[key] = sub_data
return result
class AAZListType(AAZBaseType):
"""List value type"""
_ValueCls = AAZList
_PatchDataCls = dict
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._element = None
@property
def Element(self):
if self._element is None:
raise AAZUnknownFieldError(self, "Element")
return self._element
@Element.setter
def Element(self, value):
if self._element is None:
assert isinstance(value, AAZBaseType)
self._element = value
assert self._element._name is None
assert not self._element._options
assert self._element._serialized_name is None
elif self._element != value:
raise AAZConflictFieldDefinitionError(self, "Element", "Redefine element in different schema")
def __getitem__(self, key):
return self.Element
def process_data(self, data, **kwargs):
if data == None: # noqa: E711, pylint: disable=singleton-comparison
# data can be None or AAZSimpleValue == None
if self._nullable:
return None
return AAZValuePatch.build(self)
if isinstance(data, AAZList) and data._is_patch:
# use value patch
result = AAZValuePatch.build(self)
else:
result = {}
value = AAZList(schema=self, data=result)
if isinstance(data, AAZList):
for idx in data._data.keys():
value[idx] = data[idx]
else:
assert isinstance(data, list)
for idx, sub_data in enumerate(data):
value[idx] = sub_data
return result
| [
"[email protected]"
] | |
e2aa1c6699efd5f2501f3a550014dce289e3e328 | 445b158bd10c79e19a679264745add3b3353dea3 | /linux/bin/django-admin | c97656a5f6288659744de8ee0c98cf01c2083159 | [] | no_license | Carlosdher/topicos_especiasi | 27e523830408b49e852c8c03fc4d0c6ecb14f5e9 | 86df42ea4b514fe9159d83a44ed9cd7a9544ca96 | refs/heads/master | 2020-03-30T18:48:38.385266 | 2018-10-04T04:57:30 | 2018-10-04T04:57:30 | 151,515,273 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 317 | #!/home/ifpb/topicos/AndrmedAnime/andromedanimes/linux/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
] | ||
124fc4b718a9dab6fff8c0385fd2df121008dbe7 | 649bd422025e421d86025743eac324c9b882a2e8 | /exam/1_three-dimensional_atomic_system/dump/phasetrans/temp63_6000.py | ab89496808490d398ea518254730037f229a6d94 | [] | no_license | scheuclu/atom_class | 36ddee1f6a5995872e858add151c5942c109847c | 0c9a8c63d9b38898c1869fe8983126cef17662cd | refs/heads/master | 2021-01-21T10:52:28.448221 | 2017-03-07T23:04:41 | 2017-03-07T23:04:41 | 83,489,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 68,855 | py | ITEM: TIMESTEP
6000
ITEM: NUMBER OF ATOMS
2048
ITEM: BOX BOUNDS pp pp pp
5.6030138703177812e-01 4.6639698612962775e+01
5.6030138703177812e-01 4.6639698612962775e+01
5.6030138703177812e-01 4.6639698612962775e+01
ITEM: ATOMS id type xs ys zs
8 1 0.12444 0.0623981 0.061891
35 1 0.0582605 0.124975 0.0550047
130 1 0.0637287 0.0668646 0.121203
165 1 0.120874 0.130261 0.13156
4 1 0.00150043 0.0623079 0.0564314
161 1 0.998473 0.124009 0.121724
1419 1 0.316802 0.494168 0.442347
1409 1 0.00502467 0.498251 0.369802
12 1 0.245873 0.0567546 0.0556125
39 1 0.186111 0.12009 0.0577984
43 1 0.315752 0.121246 0.0547671
134 1 0.181885 0.0618589 0.11715
138 1 0.314531 0.0587031 0.118126
169 1 0.253027 0.111277 0.117433
1165 1 0.371531 0.496451 0.131497
1289 1 0.250231 0.496989 0.250986
1311 1 0.939074 0.498062 0.308301
149 1 0.627768 0.00353126 0.119736
16 1 0.379071 0.062049 0.0604854
47 1 0.433664 0.120636 0.060287
142 1 0.4346 0.0617573 0.125274
173 1 0.37898 0.127896 0.125631
177 1 0.501266 0.129039 0.127384
20 1 0.494725 0.0626016 0.0646274
279 1 0.684118 0.0058775 0.315818
1281 1 0.00242533 0.494629 0.247383
1545 1 0.244517 0.490052 0.497234
102 1 0.188484 0.432178 7.39043e-05
24 1 0.627853 0.0641352 0.0581492
51 1 0.56059 0.118334 0.0564849
146 1 0.561021 0.0592402 0.128393
181 1 0.618552 0.121827 0.124482
257 1 0.00192584 0.00698874 0.247346
630 1 0.686554 0.447899 0.49941
28 1 0.75173 0.0625279 0.061638
55 1 0.687014 0.124842 0.062515
59 1 0.81207 0.121475 0.059052
150 1 0.68505 0.0696414 0.122178
154 1 0.812714 0.065107 0.124535
185 1 0.744416 0.123448 0.119306
30 1 0.93499 0.0552616 0.00229402
533 1 0.624947 0.00283983 0.493698
287 1 0.94157 0.00980448 0.321209
32 1 0.869998 0.0643414 0.0674393
63 1 0.93657 0.12139 0.060028
158 1 0.938076 0.0641834 0.123701
189 1 0.874876 0.125128 0.12009
40 1 0.124932 0.182981 0.0660949
67 1 0.0623293 0.244783 0.0602175
72 1 0.116711 0.308923 0.0678913
162 1 0.0529438 0.183079 0.116554
194 1 0.0604205 0.305847 0.126659
197 1 0.126413 0.248221 0.124832
193 1 0.00543885 0.248064 0.125848
36 1 0.994836 0.181134 0.0613387
82 1 0.56249 0.309075 0.00381656
1293 1 0.369085 0.494235 0.25695
44 1 0.254925 0.182219 0.0605848
71 1 0.18936 0.243228 0.0626162
75 1 0.316341 0.248126 0.0588503
76 1 0.245747 0.30404 0.0572344
166 1 0.190907 0.180401 0.128749
170 1 0.315466 0.190222 0.120726
198 1 0.186839 0.307483 0.121779
201 1 0.243897 0.244643 0.121681
202 1 0.303934 0.311076 0.127861
19 1 0.566983 0.00147081 0.0622447
93 1 0.871216 0.244848 0.0019519
48 1 0.37444 0.181925 0.0604571
79 1 0.438714 0.248176 0.0622703
80 1 0.378783 0.308904 0.0561765
174 1 0.445005 0.193267 0.118931
205 1 0.374799 0.254764 0.121374
206 1 0.436832 0.311928 0.121523
52 1 0.504048 0.184841 0.0580631
261 1 0.121109 0.0067517 0.249282
209 1 0.499502 0.252733 0.125259
573 1 0.874839 0.122131 0.497248
159 1 0.940243 0.0012741 0.18553
84 1 0.495512 0.311129 0.0592797
56 1 0.621307 0.180774 0.0600112
83 1 0.558022 0.254476 0.0667992
88 1 0.625761 0.313512 0.061136
178 1 0.564261 0.183735 0.119387
210 1 0.55766 0.3187 0.121252
213 1 0.613296 0.257227 0.125771
514 1 0.0544174 0.068975 0.494004
60 1 0.75061 0.186879 0.0601226
87 1 0.688621 0.249011 0.0613499
91 1 0.808881 0.248091 0.0609563
92 1 0.752927 0.310201 0.0609056
182 1 0.687777 0.193295 0.127824
186 1 0.812065 0.182052 0.122319
214 1 0.692651 0.314717 0.122215
217 1 0.753712 0.244203 0.128096
218 1 0.814024 0.314768 0.128422
1049 1 0.746431 0.497658 -0.000122115
1291 1 0.310158 0.492075 0.318194
117 1 0.618512 0.378133 0.00362121
269 1 0.377331 0.00499927 0.251074
68 1 0.997112 0.307795 0.0610285
64 1 0.870251 0.178727 0.0634613
95 1 0.93934 0.253194 0.0621683
96 1 0.874108 0.31198 0.0703334
190 1 0.939109 0.184185 0.122414
221 1 0.871664 0.246589 0.11867
222 1 0.94061 0.310371 0.124645
139 1 0.31857 0.00620801 0.186928
126 1 0.939043 0.432363 0.00471697
147 1 0.555921 0.00205978 0.196154
99 1 0.0659022 0.375363 0.0615476
104 1 0.126027 0.433835 0.0624286
226 1 0.0667914 0.434121 0.119746
229 1 0.126585 0.369837 0.124511
103 1 0.188564 0.373642 0.0601006
107 1 0.315928 0.366937 0.0650747
108 1 0.251499 0.437046 0.062056
230 1 0.184284 0.433836 0.122203
233 1 0.248584 0.374804 0.126538
234 1 0.314474 0.432212 0.122664
1153 1 0.00384226 0.493836 0.125641
285 1 0.875928 0.00751488 0.244695
111 1 0.43165 0.371138 0.0544634
112 1 0.376831 0.431958 0.0580801
237 1 0.37571 0.371461 0.121162
238 1 0.433202 0.434106 0.121666
116 1 0.500842 0.440782 0.0561614
1287 1 0.190427 0.495602 0.317893
131 1 0.062427 0.00464141 0.185171
391 1 0.189515 0.0074647 0.442813
241 1 0.496481 0.374764 0.126585
115 1 0.560723 0.375641 0.0595864
120 1 0.625239 0.439463 0.0640738
242 1 0.564255 0.446146 0.11483
245 1 0.623342 0.367325 0.120099
1423 1 0.431321 0.497559 0.436616
141 1 0.375363 0.00318121 0.124731
1029 1 0.123438 0.498595 0.00256369
119 1 0.687774 0.370958 0.066719
123 1 0.810242 0.374765 0.05756
124 1 0.748622 0.43772 0.0638195
246 1 0.683611 0.434867 0.123975
249 1 0.753557 0.375845 0.124202
250 1 0.811365 0.442979 0.126877
582 1 0.184043 0.312941 0.498343
1417 1 0.251025 0.498897 0.380256
153 1 0.751205 0.0101551 0.13018
225 1 0.00642863 0.372448 0.11966
100 1 0.00503392 0.434521 0.0640775
512 1 0.868833 0.433645 0.434769
127 1 0.932285 0.375985 0.0605551
128 1 0.877117 0.44217 0.0679365
253 1 0.874816 0.379915 0.126953
254 1 0.940326 0.432307 0.129342
1161 1 0.258653 0.493428 0.119248
511 1 0.935031 0.373775 0.437991
510 1 0.935481 0.434714 0.381527
136 1 0.128351 0.0668093 0.186258
163 1 0.0603032 0.125027 0.187071
258 1 0.0613919 0.0677561 0.245331
264 1 0.12639 0.0644282 0.321252
291 1 0.0647405 0.115584 0.316336
293 1 0.127521 0.118245 0.25337
260 1 0.9988 0.0657708 0.31196
289 1 0.997191 0.12503 0.252149
509 1 0.875017 0.371823 0.374603
140 1 0.250065 0.056535 0.188988
167 1 0.18624 0.124634 0.190371
171 1 0.313155 0.125548 0.178419
262 1 0.195287 0.061309 0.253147
266 1 0.311552 0.0647463 0.245302
268 1 0.254319 0.0648983 0.306644
295 1 0.184177 0.129553 0.319684
297 1 0.248875 0.124893 0.245777
299 1 0.31269 0.133819 0.314622
281 1 0.747411 0.00615523 0.249701
122 1 0.81587 0.43631 0.00205621
11 1 0.316494 0.000368865 0.0597195
144 1 0.377209 0.0631702 0.187318
175 1 0.440941 0.122578 0.187599
270 1 0.44079 0.0644984 0.248009
272 1 0.373944 0.0711766 0.311521
301 1 0.371976 0.126516 0.25026
303 1 0.44059 0.129954 0.313722
305 1 0.503644 0.132695 0.250809
148 1 0.503642 0.0668217 0.18498
276 1 0.494581 0.0664909 0.310818
395 1 0.307748 0.00521505 0.438444
389 1 0.117186 0.00109335 0.378522
152 1 0.618764 0.0581184 0.190937
179 1 0.561057 0.129651 0.192037
274 1 0.554206 0.059914 0.253118
280 1 0.618895 0.0622086 0.305785
307 1 0.560907 0.125471 0.309987
309 1 0.61979 0.130036 0.242979
1163 1 0.309963 0.488663 0.188085
156 1 0.746549 0.0680438 0.18975
183 1 0.679605 0.124373 0.185008
187 1 0.815338 0.126515 0.186994
278 1 0.680592 0.0634973 0.245443
282 1 0.805421 0.0682478 0.252107
284 1 0.745323 0.0667921 0.311508
311 1 0.681442 0.122943 0.304615
313 1 0.745665 0.124041 0.248831
315 1 0.811879 0.126828 0.310858
132 1 0.00193496 0.0651574 0.184431
160 1 0.881031 0.0637261 0.185875
191 1 0.933323 0.125352 0.186282
286 1 0.939187 0.0650011 0.244991
288 1 0.880459 0.0609115 0.305572
317 1 0.879185 0.125747 0.247334
319 1 0.934499 0.126273 0.313187
168 1 0.121649 0.181037 0.19502
195 1 0.0664202 0.242044 0.185993
200 1 0.12026 0.307888 0.189772
290 1 0.0659051 0.187853 0.260796
296 1 0.125702 0.187015 0.319162
322 1 0.0576213 0.310725 0.248726
323 1 0.0626808 0.247646 0.318305
325 1 0.121999 0.251153 0.250979
328 1 0.119592 0.307601 0.319147
292 1 0.00276911 0.187753 0.315415
321 1 0.995314 0.250678 0.256343
196 1 0.998418 0.30588 0.189931
324 1 0.000800636 0.312478 0.312602
172 1 0.253381 0.18877 0.18438
199 1 0.188455 0.245422 0.194735
203 1 0.312297 0.249199 0.186414
204 1 0.252776 0.310831 0.1943
294 1 0.183491 0.182516 0.25265
298 1 0.31213 0.185946 0.250578
300 1 0.246499 0.182912 0.31421
326 1 0.189475 0.306251 0.255678
327 1 0.185366 0.24987 0.315103
329 1 0.254425 0.245934 0.257435
330 1 0.318393 0.309734 0.249545
331 1 0.320163 0.24545 0.311885
332 1 0.24738 0.310016 0.314061
176 1 0.37407 0.185073 0.184165
207 1 0.430468 0.246012 0.18765
208 1 0.376042 0.309395 0.185961
302 1 0.440361 0.194209 0.246078
304 1 0.376458 0.192566 0.318665
333 1 0.376207 0.248227 0.247971
334 1 0.436682 0.305116 0.24854
335 1 0.442074 0.249243 0.311545
336 1 0.37591 0.309035 0.308337
180 1 0.496848 0.188441 0.183941
308 1 0.500932 0.188948 0.314173
337 1 0.500948 0.255313 0.24838
340 1 0.49917 0.317923 0.316992
212 1 0.501505 0.313436 0.185973
184 1 0.6144 0.189199 0.185243
211 1 0.562174 0.257311 0.189646
216 1 0.623713 0.32494 0.184546
306 1 0.563701 0.193162 0.251304
312 1 0.626952 0.19685 0.309254
338 1 0.558538 0.310986 0.257698
339 1 0.558477 0.250305 0.314667
341 1 0.625099 0.257572 0.249025
344 1 0.62216 0.308397 0.315418
188 1 0.754928 0.182176 0.182186
215 1 0.682245 0.252509 0.18578
219 1 0.813089 0.244379 0.186235
220 1 0.750776 0.308994 0.195238
310 1 0.678676 0.185722 0.236526
314 1 0.80589 0.18464 0.246824
316 1 0.743222 0.189648 0.319365
342 1 0.681006 0.320397 0.253116
343 1 0.681721 0.253965 0.318711
345 1 0.747936 0.249704 0.25323
346 1 0.8182 0.310101 0.252973
347 1 0.808521 0.244513 0.314192
348 1 0.745221 0.309919 0.313201
164 1 0.00100896 0.18818 0.18785
192 1 0.874179 0.187121 0.194343
223 1 0.940696 0.249546 0.185458
224 1 0.879795 0.306834 0.190982
318 1 0.936149 0.184997 0.252834
320 1 0.873795 0.187984 0.311061
349 1 0.873455 0.243748 0.24802
350 1 0.939298 0.316446 0.249371
351 1 0.937293 0.251381 0.31716
352 1 0.879101 0.316271 0.305899
503 1 0.682156 0.376911 0.435022
227 1 0.0602298 0.369516 0.182865
232 1 0.120866 0.434274 0.182755
354 1 0.0688913 0.438487 0.247299
355 1 0.0673878 0.37337 0.314921
357 1 0.130634 0.372501 0.247406
360 1 0.130364 0.437845 0.313141
507 1 0.81333 0.376842 0.432422
155 1 0.811898 0.00537828 0.194512
231 1 0.190495 0.369394 0.186047
235 1 0.317449 0.374899 0.190283
236 1 0.254229 0.428868 0.187182
358 1 0.18853 0.435185 0.244189
359 1 0.189102 0.369096 0.31545
361 1 0.255387 0.368851 0.249213
362 1 0.304908 0.433021 0.254531
363 1 0.308354 0.369056 0.317782
364 1 0.248147 0.429319 0.318772
505 1 0.738988 0.374047 0.370555
239 1 0.436587 0.374729 0.188723
240 1 0.375899 0.43356 0.18493
365 1 0.372252 0.373906 0.248887
366 1 0.432234 0.441168 0.247369
367 1 0.43101 0.375493 0.316385
368 1 0.36744 0.431842 0.31145
506 1 0.808389 0.431991 0.369435
502 1 0.686853 0.436648 0.383136
372 1 0.501625 0.442341 0.315735
369 1 0.492893 0.378779 0.251875
243 1 0.558753 0.382278 0.182978
248 1 0.622358 0.437779 0.178565
370 1 0.553013 0.43727 0.245957
371 1 0.561257 0.38522 0.312967
373 1 0.61781 0.379203 0.247691
376 1 0.619972 0.450318 0.312231
244 1 0.49896 0.43837 0.18168
508 1 0.752994 0.436469 0.433379
283 1 0.811533 0.000956246 0.30587
605 1 0.864435 0.247899 0.497068
247 1 0.691506 0.382015 0.185771
251 1 0.807817 0.369892 0.18736
252 1 0.754932 0.442059 0.192511
374 1 0.696598 0.439153 0.248639
375 1 0.678511 0.379228 0.31032
377 1 0.748349 0.378053 0.257981
378 1 0.813477 0.444308 0.255524
379 1 0.810524 0.368976 0.310111
380 1 0.738938 0.440148 0.316239
228 1 0.00647611 0.429582 0.183391
353 1 0.00529142 0.373426 0.250804
356 1 0.00811397 0.431255 0.309381
255 1 0.939981 0.376628 0.185023
256 1 0.874615 0.43472 0.194412
381 1 0.86872 0.374525 0.249605
382 1 0.942182 0.434067 0.252425
383 1 0.938593 0.37427 0.316069
384 1 0.871745 0.437393 0.315704
85 1 0.622551 0.244384 0.00510188
1177 1 0.743355 0.498273 0.127474
151 1 0.683633 0.00657384 0.184983
94 1 0.932396 0.310067 0.00132938
386 1 0.0621343 0.0627282 0.380937
392 1 0.120411 0.0579251 0.444451
419 1 0.0656636 0.125532 0.438521
421 1 0.123625 0.127156 0.38484
417 1 0.998289 0.127986 0.377899
388 1 0.994837 0.0688602 0.435415
97 1 0.996939 0.371978 0.00520295
129 1 0.00286957 0.00200078 0.127994
538 1 0.815806 0.0590544 0.499699
1413 1 0.121546 0.494852 0.381641
390 1 0.182633 0.063502 0.380835
394 1 0.308809 0.0663321 0.372069
396 1 0.251437 0.0676016 0.441874
423 1 0.182659 0.125479 0.44782
425 1 0.242458 0.123128 0.380639
427 1 0.320475 0.123522 0.437022
1157 1 0.123094 0.494362 0.116926
259 1 0.0597156 0.000554338 0.309493
1055 1 0.941311 0.500144 0.0646553
542 1 0.928595 0.0607122 0.495697
398 1 0.433597 0.0620397 0.37234
400 1 0.379086 0.059928 0.434181
429 1 0.375368 0.125047 0.373871
431 1 0.437105 0.121677 0.432369
433 1 0.49927 0.132178 0.375154
267 1 0.311192 0.00143111 0.307004
501 1 0.622754 0.379602 0.376118
637 1 0.873593 0.377128 0.495968
404 1 0.507486 0.0596136 0.43198
402 1 0.559588 0.0655147 0.367408
408 1 0.620938 0.0645865 0.437132
435 1 0.566976 0.124373 0.435019
437 1 0.627357 0.125957 0.370126
1425 1 0.497513 0.496265 0.379343
500 1 0.495979 0.436968 0.439597
497 1 0.4986 0.375586 0.374112
406 1 0.680431 0.064893 0.375455
410 1 0.813466 0.066913 0.369796
412 1 0.746219 0.0641469 0.436204
439 1 0.684188 0.122668 0.432937
441 1 0.751506 0.129841 0.38025
443 1 0.812224 0.125832 0.438443
145 1 0.501485 0.0012944 0.124551
118 1 0.682878 0.440805 0.00358665
414 1 0.937454 0.0677113 0.378865
416 1 0.870251 0.06408 0.430585
445 1 0.872395 0.128765 0.37346
447 1 0.933686 0.131211 0.438515
493 1 0.36702 0.375001 0.380735
418 1 0.0578992 0.186595 0.380891
424 1 0.126038 0.188297 0.445447
450 1 0.0568842 0.313959 0.381396
451 1 0.0571858 0.24757 0.439139
453 1 0.120478 0.246213 0.38311
456 1 0.119389 0.307509 0.444927
452 1 0.995936 0.309039 0.440013
449 1 0.998292 0.250715 0.37323
420 1 0.997989 0.192924 0.443361
614 1 0.180535 0.438235 0.498037
1167 1 0.436478 0.497842 0.188967
422 1 0.192053 0.196281 0.380219
426 1 0.303378 0.187835 0.375432
428 1 0.245274 0.181146 0.444729
454 1 0.188645 0.310255 0.385747
455 1 0.192425 0.245147 0.443068
457 1 0.249161 0.250216 0.377028
458 1 0.312027 0.307572 0.381562
459 1 0.312024 0.242051 0.443471
460 1 0.254331 0.307783 0.444231
1159 1 0.192478 0.495946 0.18447
1439 1 0.934467 0.496458 0.437318
430 1 0.434064 0.189148 0.381935
432 1 0.374352 0.185036 0.437976
461 1 0.372427 0.252246 0.384187
462 1 0.43398 0.317164 0.372243
463 1 0.44084 0.253681 0.434746
464 1 0.373221 0.311997 0.443323
436 1 0.505743 0.19481 0.437452
1181 1 0.879514 0.495624 0.133172
397 1 0.371087 0.00130378 0.371262
468 1 0.498774 0.314845 0.443232
465 1 0.500429 0.248589 0.371581
434 1 0.569422 0.187168 0.371918
467 1 0.556573 0.256336 0.443669
440 1 0.621796 0.191543 0.439243
466 1 0.56167 0.321191 0.371601
469 1 0.619391 0.254139 0.3813
472 1 0.617206 0.319305 0.441901
1031 1 0.186882 0.494529 0.061741
491 1 0.306756 0.372072 0.438997
566 1 0.686129 0.180153 0.496003
22 1 0.688262 0.0552262 0.0010769
494 1 0.431778 0.432305 0.379667
444 1 0.747175 0.189598 0.441543
438 1 0.683001 0.190499 0.379259
442 1 0.811106 0.19328 0.378655
470 1 0.682916 0.313416 0.376147
471 1 0.689524 0.252553 0.440203
474 1 0.810183 0.314009 0.375942
473 1 0.74479 0.254118 0.371719
476 1 0.750133 0.32101 0.434276
475 1 0.804745 0.257389 0.436975
495 1 0.432882 0.370783 0.441763
271 1 0.431187 0.00140101 0.313296
504 1 0.624359 0.441443 0.438097
478 1 0.941797 0.311089 0.382296
477 1 0.871075 0.250154 0.37388
446 1 0.942383 0.188522 0.376064
448 1 0.867556 0.189697 0.434373
479 1 0.927932 0.250075 0.441549
480 1 0.870811 0.312312 0.438882
86 1 0.692368 0.314495 0.0011328
498 1 0.559467 0.443417 0.387234
483 1 0.0591234 0.370582 0.442847
482 1 0.0689969 0.431617 0.373544
481 1 1.00087 0.372138 0.374188
485 1 0.124249 0.371542 0.383229
488 1 0.124029 0.435318 0.441288
484 1 0.00429971 0.429633 0.432952
499 1 0.560862 0.374856 0.44011
492 1 0.248273 0.428716 0.433976
490 1 0.308755 0.4349 0.379553
486 1 0.182055 0.438495 0.380765
496 1 0.372463 0.436933 0.440998
489 1 0.251577 0.368749 0.374303
487 1 0.182598 0.373809 0.439116
15 1 0.441521 0.00147317 0.0667178
135 1 0.183932 0.00458123 0.183454
409 1 0.749784 0.00482904 0.371694
629 1 0.620086 0.377774 0.499989
263 1 0.189697 0.00854906 0.315895
403 1 0.564691 0.00386742 0.43494
1301 1 0.624737 0.497618 0.247322
1051 1 0.815074 0.495941 0.0585251
1183 1 0.942614 0.493347 0.190009
1437 1 0.873693 0.496756 0.380467
558 1 0.438523 0.188 0.491995
399 1 0.438839 0.00248062 0.428746
1295 1 0.431979 0.490014 0.314043
413 1 0.881261 0.00637103 0.369449
411 1 0.810151 0.00101709 0.427187
415 1 0.934498 0.00858642 0.432477
393 1 0.251098 0.00180932 0.374238
1421 1 0.37882 0.496235 0.375681
633 1 0.742603 0.381053 0.496779
638 1 0.933028 0.435659 0.496988
589 1 0.377109 0.248 0.499545
38 1 0.188621 0.185055 0.00308009
597 1 0.620344 0.25089 0.496054
526 1 0.439608 0.060435 0.49546
541 1 0.873042 0.000588261 0.488053
62 1 0.937785 0.191266 -8.12757e-05
554 1 0.312527 0.180663 0.499263
74 1 0.315328 0.313726 0.00174814
1553 1 0.492836 0.500185 0.49711
1565 1 0.869953 0.496883 0.499477
105 1 0.248069 0.374803 0.00509663
617 1 0.246725 0.374332 0.499237
101 1 0.12814 0.369912 0.00644365
534 1 0.685952 0.0641555 0.49222
69 1 0.123353 0.244194 0.00498479
66 1 0.0657592 0.310174 0.00484967
109 1 0.369752 0.374503 0.00114195
110 1 0.436786 0.431199 0.0018401
569 1 0.7482 0.121654 0.496775
598 1 0.686965 0.313427 0.494907
17 1 0.50787 0.00313422 0.00268958
610 1 0.065119 0.443368 0.497963
520 1 0.125186 0.0706043 0.558371
547 1 0.0565099 0.11817 0.555473
642 1 0.0574703 0.0610256 0.627982
677 1 0.118922 0.122526 0.619738
45 1 0.379989 0.124086 0.998426
601 1 0.751147 0.247484 0.500546
643 1 0.0631374 0.00169898 0.680867
524 1 0.253181 0.0669609 0.566908
551 1 0.184153 0.125152 0.566522
555 1 0.321336 0.121295 0.561739
646 1 0.189875 0.0689336 0.625655
650 1 0.318223 0.0639009 0.626983
681 1 0.250707 0.12382 0.627683
550 1 0.186098 0.185115 0.509642
546 1 0.0595141 0.189908 0.504472
528 1 0.380099 0.0646016 0.562027
559 1 0.439209 0.124927 0.56751
654 1 0.442443 0.0669219 0.63034
685 1 0.377556 0.127047 0.62656
532 1 0.506361 0.0601273 0.564655
1823 1 0.935387 0.493694 0.804951
53 1 0.62199 0.120317 0.998577
769 1 0.996598 0.00348329 0.7576
689 1 0.508375 0.116417 0.62737
536 1 0.62979 0.0605656 0.558074
563 1 0.576655 0.122623 0.569284
658 1 0.57257 0.0567327 0.627856
693 1 0.630157 0.127784 0.628333
647 1 0.178687 0.00210334 0.690618
578 1 0.0572231 0.313645 0.503372
655 1 0.438848 0.00799093 0.692649
1797 1 0.126475 0.497276 0.748476
779 1 0.310073 0.000918112 0.809403
1024 1 0.876407 0.436794 0.938386
540 1 0.751753 0.0612679 0.55966
567 1 0.682478 0.127183 0.561016
571 1 0.810198 0.126155 0.56373
662 1 0.686335 0.0620947 0.626038
666 1 0.811973 0.0618575 0.622644
697 1 0.753316 0.128341 0.624643
530 1 0.565794 0.0646924 0.500846
613 1 0.119706 0.381534 0.503935
525 1 0.374178 0.00647992 0.503937
516 1 0.993349 0.0672167 0.559298
673 1 0.99922 0.129199 0.625328
544 1 0.871446 0.0647438 0.567338
575 1 0.927003 0.126327 0.558603
670 1 0.932732 0.069127 0.625242
701 1 0.873759 0.132993 0.622327
1023 1 0.934063 0.373155 0.939559
797 1 0.878004 0.00374646 0.750621
653 1 0.384352 0.00296538 0.626483
552 1 0.118888 0.185472 0.567591
579 1 0.057971 0.248092 0.567962
584 1 0.121027 0.312933 0.567249
674 1 0.0556767 0.189612 0.628365
706 1 0.0616671 0.306549 0.630399
709 1 0.127696 0.250375 0.63026
705 1 0.993576 0.250963 0.626892
580 1 0.995982 0.315451 0.556144
1927 1 0.184452 0.498148 0.937447
1929 1 0.249341 0.499788 0.878058
70 1 0.187679 0.303534 0.999446
556 1 0.250233 0.183485 0.56525
583 1 0.178496 0.244571 0.566172
587 1 0.309209 0.245864 0.560199
588 1 0.245819 0.312674 0.561852
678 1 0.184315 0.188894 0.628478
682 1 0.315286 0.189953 0.626551
710 1 0.185549 0.311803 0.625716
713 1 0.249604 0.248134 0.621371
714 1 0.311369 0.307354 0.62499
515 1 0.0527318 0.0066067 0.563202
789 1 0.623128 0.00284259 0.751456
560 1 0.381588 0.186797 0.564406
591 1 0.436088 0.249694 0.567655
592 1 0.374529 0.307323 0.569046
686 1 0.440543 0.187985 0.625979
717 1 0.371104 0.247882 0.622867
718 1 0.435009 0.318759 0.626636
596 1 0.500932 0.317889 0.564729
721 1 0.507347 0.249942 0.626822
590 1 0.431975 0.312956 0.504896
513 1 0.986227 0.00135943 0.499657
564 1 0.50037 0.184517 0.559952
568 1 0.623744 0.19141 0.561023
595 1 0.558005 0.258156 0.563978
600 1 0.6275 0.315308 0.554655
690 1 0.568841 0.19135 0.627558
722 1 0.562082 0.317339 0.620839
725 1 0.620755 0.256455 0.614446
572 1 0.748541 0.188025 0.561926
599 1 0.695458 0.248043 0.567269
603 1 0.806882 0.258409 0.563183
604 1 0.748931 0.31894 0.560318
694 1 0.69165 0.187577 0.622646
698 1 0.816134 0.19171 0.628371
726 1 0.682247 0.314278 0.618254
729 1 0.753654 0.245991 0.625245
730 1 0.816645 0.315972 0.621804
519 1 0.191131 0.00704395 0.567147
548 1 0.996095 0.183537 0.56411
576 1 0.8692 0.192259 0.55805
607 1 0.93143 0.246052 0.566288
608 1 0.875616 0.317147 0.560966
702 1 0.93502 0.189305 0.630564
733 1 0.874835 0.253184 0.625391
734 1 0.939442 0.311128 0.622011
737 1 0.998412 0.376241 0.621708
611 1 0.0634013 0.372537 0.570381
616 1 0.120037 0.439584 0.569268
738 1 0.0629089 0.432474 0.632418
741 1 0.120486 0.36997 0.626814
903 1 0.18919 0.00129957 0.940827
645 1 0.121096 0.00597375 0.619396
577 1 0.997045 0.247206 0.507639
522 1 0.312138 0.0570277 0.500999
615 1 0.185981 0.375888 0.562465
619 1 0.305454 0.375463 0.566859
620 1 0.246127 0.434996 0.564379
742 1 0.18481 0.4385 0.626544
745 1 0.24559 0.374365 0.621033
746 1 0.308766 0.439669 0.622117
1022 1 0.936061 0.435023 0.874246
1805 1 0.372608 0.493552 0.745462
623 1 0.431157 0.371043 0.563872
624 1 0.370091 0.44099 0.573075
749 1 0.367275 0.376047 0.62305
750 1 0.439135 0.441198 0.625898
628 1 0.488361 0.435443 0.559643
1053 1 0.883943 0.49845 1.00047
753 1 0.494157 0.374518 0.62784
627 1 0.565378 0.381387 0.565638
632 1 0.623627 0.441731 0.565539
754 1 0.55665 0.436367 0.623361
757 1 0.621167 0.377815 0.621838
1021 1 0.877605 0.380547 0.876608
1 1 0.998496 0.00135202 0.99689
1807 1 0.43551 0.499473 0.817448
787 1 0.566196 0.0014426 0.810447
631 1 0.685498 0.382297 0.563649
635 1 0.817588 0.382224 0.566169
636 1 0.751197 0.430018 0.5594
758 1 0.683555 0.435629 0.625129
761 1 0.751291 0.378328 0.625839
762 1 0.815728 0.441062 0.624746
1819 1 0.813472 0.497877 0.814957
565 1 0.62436 0.126094 0.498858
535 1 0.693382 0.00401472 0.564057
925 1 0.868694 0.00218867 0.872716
791 1 0.686319 0.0022474 0.816672
586 1 0.314966 0.308227 0.506553
1925 1 0.128077 0.495482 0.873706
612 1 0.999349 0.444134 0.562789
639 1 0.935554 0.379323 0.559228
640 1 0.877624 0.441201 0.562933
765 1 0.875558 0.379303 0.62732
766 1 0.942569 0.438963 0.628643
657 1 0.505989 0.00198377 0.627938
634 1 0.810338 0.441384 0.502751
667 1 0.810104 0.00255287 0.686965
648 1 0.121376 0.0628482 0.688562
675 1 0.0655549 0.123806 0.686789
770 1 0.0585647 0.0599724 0.746801
776 1 0.125644 0.0645185 0.813674
803 1 0.0568476 0.121633 0.806748
805 1 0.123541 0.118809 0.750518
801 1 0.998195 0.119076 0.749418
644 1 0.997194 0.0631855 0.688282
1019 1 0.812514 0.373419 0.940732
574 1 0.936183 0.190497 0.500377
652 1 0.252799 0.0585181 0.689173
679 1 0.186776 0.12777 0.690356
683 1 0.313834 0.127262 0.692388
774 1 0.196553 0.0596026 0.754073
778 1 0.310388 0.0595287 0.752382
780 1 0.251172 0.0657044 0.815101
807 1 0.192621 0.127129 0.812658
809 1 0.259103 0.12676 0.747674
811 1 0.31685 0.125456 0.814035
73 1 0.24794 0.238944 0.999596
1018 1 0.812369 0.437772 0.872338
1937 1 0.497019 0.500304 0.874532
656 1 0.370277 0.0621427 0.688687
687 1 0.443806 0.130165 0.691103
782 1 0.436994 0.0684486 0.752552
784 1 0.37364 0.0568018 0.812602
813 1 0.37223 0.122661 0.750546
815 1 0.443567 0.125602 0.812693
817 1 0.509286 0.131735 0.750682
660 1 0.504407 0.0671763 0.691923
788 1 0.505749 0.0690591 0.811766
602 1 0.81053 0.318015 0.500788
664 1 0.629329 0.062391 0.687149
691 1 0.57004 0.127902 0.687104
786 1 0.567289 0.0697114 0.749286
792 1 0.627874 0.0669232 0.812329
819 1 0.569975 0.131612 0.80741
821 1 0.635249 0.126631 0.744045
10 1 0.314096 0.0614304 0.999913
18 1 0.564788 0.062994 0.997558
42 1 0.316918 0.182106 0.996217
1014 1 0.687052 0.436206 0.874152
668 1 0.751703 0.0593792 0.685633
695 1 0.695692 0.124375 0.686848
699 1 0.81961 0.122449 0.692351
790 1 0.688285 0.0583026 0.752116
794 1 0.815845 0.0581281 0.749091
796 1 0.752177 0.0599462 0.81058
823 1 0.693258 0.129151 0.809776
825 1 0.75238 0.127031 0.748196
827 1 0.815667 0.12929 0.808686
1020 1 0.754887 0.435272 0.93543
772 1 0.00157227 0.0628751 0.813706
672 1 0.874176 0.0600592 0.685526
703 1 0.935868 0.128141 0.688919
798 1 0.931362 0.0669433 0.743058
800 1 0.874811 0.0611617 0.81361
829 1 0.875342 0.126804 0.749566
831 1 0.935168 0.122638 0.807557
708 1 0.998335 0.310997 0.689763
680 1 0.123948 0.185558 0.694866
707 1 0.0617692 0.249512 0.69406
712 1 0.126697 0.307738 0.687393
802 1 0.0532076 0.180087 0.744665
808 1 0.115426 0.180877 0.807913
834 1 0.0651461 0.312708 0.75687
835 1 0.0571394 0.24408 0.810006
837 1 0.123279 0.249767 0.754425
840 1 0.122443 0.314797 0.81297
676 1 0.998252 0.1826 0.684551
833 1 0.998011 0.251634 0.746012
804 1 0.996743 0.181831 0.808831
684 1 0.253566 0.186645 0.687722
711 1 0.186545 0.246213 0.687522
715 1 0.318272 0.244817 0.691591
716 1 0.245522 0.307422 0.686281
806 1 0.191382 0.186479 0.75581
810 1 0.31047 0.189835 0.754083
812 1 0.251319 0.187675 0.816522
838 1 0.189112 0.308684 0.751531
839 1 0.188981 0.249514 0.817368
841 1 0.244929 0.247783 0.753725
842 1 0.309512 0.308712 0.750459
843 1 0.314627 0.249703 0.819742
844 1 0.251162 0.314827 0.820275
688 1 0.376433 0.188758 0.694159
719 1 0.435507 0.252148 0.687603
720 1 0.368382 0.317976 0.688372
814 1 0.438025 0.186949 0.754533
816 1 0.372739 0.190263 0.815596
845 1 0.375997 0.253561 0.751999
846 1 0.428656 0.315771 0.753473
847 1 0.437217 0.254209 0.814525
848 1 0.372133 0.320942 0.820814
724 1 0.494069 0.31247 0.694204
692 1 0.504511 0.191379 0.684738
820 1 0.50287 0.195348 0.806817
852 1 0.497371 0.31436 0.814941
849 1 0.499599 0.258505 0.750356
696 1 0.632469 0.191226 0.687331
723 1 0.56259 0.252271 0.690137
728 1 0.623953 0.315788 0.68749
818 1 0.567683 0.192233 0.748094
824 1 0.631272 0.191414 0.804423
850 1 0.564969 0.31236 0.752037
851 1 0.571547 0.250734 0.808402
853 1 0.632935 0.250591 0.746953
856 1 0.625691 0.316253 0.819004
700 1 0.748867 0.191045 0.688423
727 1 0.687597 0.251279 0.680104
731 1 0.809216 0.262585 0.680754
732 1 0.747824 0.314558 0.678885
822 1 0.694207 0.19112 0.74964
826 1 0.812561 0.190301 0.748417
828 1 0.753131 0.182765 0.815847
854 1 0.688128 0.319234 0.74171
855 1 0.694116 0.254448 0.807262
857 1 0.754036 0.254302 0.745965
858 1 0.812543 0.316095 0.748543
859 1 0.813619 0.247056 0.811696
860 1 0.74749 0.317667 0.810816
836 1 0.99906 0.31283 0.812554
704 1 0.876322 0.189576 0.690147
735 1 0.929713 0.254238 0.686868
736 1 0.876934 0.316214 0.681452
830 1 0.937524 0.188712 0.743763
832 1 0.880407 0.19131 0.804116
861 1 0.870123 0.259508 0.74809
862 1 0.936447 0.312515 0.750339
863 1 0.938165 0.250637 0.811913
864 1 0.864934 0.320308 0.817561
739 1 0.0570503 0.368237 0.688222
744 1 0.12426 0.435009 0.686545
866 1 0.0585756 0.43933 0.753935
867 1 0.0596521 0.369652 0.814543
869 1 0.126606 0.37115 0.745877
872 1 0.124357 0.439793 0.811497
740 1 0.998302 0.433522 0.689134
1017 1 0.746583 0.37677 0.876416
743 1 0.186432 0.367451 0.685654
747 1 0.302343 0.370798 0.687235
748 1 0.240563 0.434658 0.685009
870 1 0.184021 0.435746 0.752777
871 1 0.188716 0.378475 0.815328
873 1 0.250725 0.377943 0.752413
874 1 0.313285 0.438057 0.752971
875 1 0.313344 0.375042 0.811227
876 1 0.248372 0.434927 0.817091
1015 1 0.680835 0.380678 0.942692
751 1 0.436346 0.376791 0.692397
752 1 0.37178 0.431113 0.681631
877 1 0.3712 0.377657 0.751739
878 1 0.437545 0.436091 0.751405
879 1 0.436385 0.374948 0.817143
880 1 0.372751 0.439974 0.81314
585 1 0.243302 0.25146 0.506614
1689 1 0.749249 0.498716 0.622904
756 1 0.497784 0.443754 0.686831
881 1 0.497491 0.371725 0.754308
884 1 0.495758 0.434694 0.818051
755 1 0.559462 0.371557 0.687543
760 1 0.617415 0.438548 0.685431
882 1 0.553912 0.434272 0.748652
883 1 0.560474 0.377447 0.809134
885 1 0.62408 0.380065 0.747669
888 1 0.618865 0.438509 0.810582
783 1 0.443475 0.0042499 0.809099
1009 1 0.508501 0.375943 0.880661
26 1 0.811127 0.0668724 0.999717
1005 1 0.379223 0.37866 0.879385
1951 1 0.941628 0.49889 0.937393
759 1 0.685386 0.373227 0.681992
763 1 0.812283 0.370613 0.681664
764 1 0.752035 0.438224 0.68754
886 1 0.686535 0.433027 0.750146
887 1 0.688441 0.371623 0.813442
889 1 0.75318 0.375955 0.742591
890 1 0.807902 0.435394 0.744775
891 1 0.806473 0.379192 0.805098
892 1 0.748046 0.44164 0.810037
1012 1 0.50531 0.44283 0.935158
1013 1 0.621283 0.378741 0.883001
1007 1 0.443178 0.370994 0.932672
1008 1 0.377885 0.437524 0.938697
905 1 0.250974 0.00712192 0.882966
1016 1 0.627842 0.44423 0.935657
868 1 0.998379 0.438789 0.811562
865 1 0.995969 0.37746 0.752618
767 1 0.938172 0.373189 0.684463
768 1 0.873799 0.437752 0.688222
893 1 0.871803 0.375221 0.745192
894 1 0.934231 0.430274 0.745564
895 1 0.934894 0.371605 0.814784
896 1 0.872806 0.438239 0.809972
898 1 0.059314 0.0559641 0.870302
904 1 0.118806 0.056184 0.92497
931 1 0.0577219 0.127744 0.931806
933 1 0.12878 0.129016 0.873008
900 1 0.995081 0.0610092 0.935102
1010 1 0.563357 0.44649 0.875159
14 1 0.448034 0.0562032 0.995371
902 1 0.18752 0.0659086 0.876891
906 1 0.313989 0.0619386 0.874938
908 1 0.252403 0.064856 0.936938
935 1 0.183182 0.127741 0.93755
937 1 0.251858 0.121224 0.877638
939 1 0.316647 0.120508 0.936893
999 1 0.184838 0.373877 0.941261
910 1 0.444102 0.0641176 0.877228
912 1 0.382203 0.0623856 0.942491
941 1 0.378455 0.113157 0.87518
943 1 0.442302 0.125285 0.931563
916 1 0.504645 0.0680044 0.934609
1001 1 0.248298 0.370516 0.879606
1011 1 0.565893 0.364554 0.940366
1799 1 0.196014 0.499245 0.812783
945 1 0.506345 0.130053 0.868849
914 1 0.567723 0.0571721 0.872482
920 1 0.628285 0.0598602 0.934543
947 1 0.561964 0.125569 0.934106
949 1 0.626757 0.132976 0.872114
785 1 0.502308 0.0050229 0.746361
1004 1 0.247708 0.440161 0.942388
593 1 0.492746 0.253124 0.506477
1801 1 0.251333 0.49929 0.752557
918 1 0.68539 0.0683901 0.872479
922 1 0.808938 0.0635461 0.871989
924 1 0.749387 0.0595142 0.933216
951 1 0.687015 0.119849 0.939027
953 1 0.752364 0.120353 0.870707
955 1 0.811941 0.119676 0.937505
1949 1 0.878672 0.497592 0.87327
49 1 0.501753 0.128205 0.998451
929 1 0.994933 0.125335 0.873933
926 1 0.933668 0.0689229 0.876459
928 1 0.866362 0.0585469 0.935161
957 1 0.871682 0.1244 0.869644
959 1 0.935549 0.123129 0.93703
1002 1 0.313574 0.441181 0.877888
781 1 0.376911 0.00240627 0.74927
649 1 0.248663 0.00144584 0.62617
930 1 0.05638 0.1874 0.871875
936 1 0.126354 0.186352 0.936132
962 1 0.0578799 0.312 0.873109
963 1 0.057925 0.245063 0.937172
965 1 0.117303 0.244876 0.867694
968 1 0.122745 0.302772 0.935438
964 1 1.00051 0.314202 0.944007
932 1 0.993559 0.187081 0.931085
659 1 0.569028 0.000124616 0.684965
934 1 0.183372 0.186002 0.872346
938 1 0.310629 0.181236 0.876748
940 1 0.251273 0.179872 0.934745
966 1 0.179972 0.312684 0.876167
967 1 0.183865 0.243603 0.938139
969 1 0.247652 0.251958 0.870973
970 1 0.31435 0.311254 0.879434
971 1 0.305946 0.249919 0.940121
972 1 0.247642 0.309331 0.938686
998 1 0.184845 0.434236 0.882154
1923 1 0.0673192 0.49395 0.931305
527 1 0.43842 0.000458904 0.563819
1003 1 0.311496 0.374626 0.936806
973 1 0.377333 0.243859 0.879857
944 1 0.378287 0.177755 0.93228
942 1 0.442309 0.192097 0.868704
976 1 0.385225 0.304404 0.931262
975 1 0.441256 0.243927 0.935492
974 1 0.443945 0.307229 0.872231
980 1 0.501058 0.30492 0.931777
948 1 0.501854 0.192128 0.934044
977 1 0.507237 0.253111 0.871217
981 1 0.632015 0.244437 0.87273
984 1 0.630479 0.311447 0.945009
946 1 0.567078 0.193001 0.871642
952 1 0.624502 0.184196 0.939059
978 1 0.563435 0.311647 0.875092
979 1 0.566156 0.253432 0.940571
77 1 0.377372 0.244173 0.996171
671 1 0.935315 3.52877e-05 0.684468
987 1 0.815854 0.245026 0.933979
983 1 0.684193 0.245514 0.946048
950 1 0.692891 0.192255 0.873495
954 1 0.812938 0.185557 0.875222
986 1 0.806506 0.315984 0.877275
956 1 0.74534 0.186348 0.93367
985 1 0.754745 0.250218 0.874794
982 1 0.685186 0.30248 0.882019
988 1 0.746194 0.312182 0.940355
41 1 0.253914 0.118808 0.997441
961 1 0.00204446 0.250836 0.8765
989 1 0.87362 0.245325 0.871144
991 1 0.933099 0.248402 0.939935
990 1 0.935166 0.305376 0.874784
992 1 0.871588 0.307032 0.930958
958 1 0.937736 0.190057 0.877794
960 1 0.879084 0.187914 0.936852
997 1 0.123654 0.371409 0.878028
996 1 0.993319 0.432556 0.93869
993 1 0.99184 0.366254 0.875052
994 1 0.0583989 0.436693 0.870769
1006 1 0.436392 0.438028 0.882038
1000 1 0.127321 0.434446 0.945932
995 1 0.0594105 0.37621 0.937372
621 1 0.364956 0.378874 0.507477
1557 1 0.624868 0.500453 0.500626
89 1 0.754188 0.247695 0.99778
651 1 0.31471 -0.000347492 0.688788
6 1 0.180997 0.0595844 1.00043
921 1 0.748036 0.000345972 0.873313
1675 1 0.31378 0.494558 0.685218
799 1 0.937395 0.00227704 0.819489
907 1 0.312091 0.00295784 0.939145
518 1 0.187274 0.0651571 0.502362
78 1 0.441991 0.310239 0.9961
37 1 0.116847 0.12437 0.997149
545 1 0.991105 0.130721 0.499805
1695 1 0.942046 0.497494 0.694866
1933 1 0.377724 0.496674 0.873391
553 1 0.250042 0.117386 0.507995
1671 1 0.183771 0.498101 0.692964
1667 1 0.0647027 0.499743 0.690224
1539 1 0.0616352 0.500763 0.562968
543 1 0.929522 0.00266404 0.559798
90 1 0.81352 0.309672 0.996517
641 1 0.990733 0.00498051 0.620237
625 1 0.503032 0.37888 0.501851
562 1 0.564721 0.188246 0.504981
114 1 0.562286 0.437434 0.99747
581 1 0.121958 0.246211 0.507213
113 1 0.500623 0.376747 0.994509
58 1 0.812731 0.183583 0.998223
1921 1 0.998664 0.497611 0.870573
1681 1 0.499554 0.494687 0.621464
773 1 0.116598 0.00396773 0.75859
1947 1 0.812495 0.495882 0.930357
50 1 0.560539 0.184908 0.994622
34 1 0.0558814 0.183719 0.996104
927 1 0.929504 0.00259968 0.938945
665 1 0.755787 0.00227548 0.621037
1543 1 0.179792 0.493153 0.562505
1563 1 0.819581 0.497616 0.564703
1931 1 0.314658 0.49425 0.935986
1691 1 0.812024 0.498443 0.685508
1673 1 0.249127 0.498815 0.624085
1809 1 0.494321 0.493062 0.756835
561 1 0.498086 0.11756 0.501712
13 1 0.376161 0.000512984 0.993333
46 1 0.440039 0.189841 0.99929
121 1 0.748578 0.380626 0.998814
1033 1 0.255967 0.49672 0.997712
81 1 0.500267 0.250994 0.997282
98 1 0.0632662 0.430235 0.997043
570 1 0.807707 0.181257 0.504211
65 1 0.996846 0.253154 0.997871
549 1 0.122153 0.131417 0.503273
622 1 0.429722 0.445865 0.505816
54 1 0.68484 0.182137 0.996281
125 1 0.869505 0.368021 0.998799
57 1 0.746252 0.122577 0.994974
1025 1 1.00124 0.49477 0.999132
609 1 1.00023 0.377898 0.504145
61 1 0.876733 0.119342 0.998313
2 1 0.063347 0.0653072 0.994607
618 1 0.308196 0.436737 0.503819
594 1 0.564315 0.31712 0.506298
106 1 0.310291 0.435336 0.999122
606 1 0.931956 0.305477 0.505442
537 1 0.743866 0.00823644 0.501148
33 1 0.995258 0.123871 0.995728
557 1 0.3803 0.127455 0.501511
626 1 0.560295 0.442108 0.502618
1032 1 0.124763 0.559028 0.0578975
1059 1 0.0630623 0.624248 0.0610756
1154 1 0.068717 0.558093 0.124221
1189 1 0.123895 0.623858 0.122494
1537 1 0.993504 0.500877 0.495849
1533 1 0.873876 0.877895 0.372586
1036 1 0.255736 0.556251 0.0605096
1063 1 0.19107 0.617858 0.0609451
1067 1 0.314176 0.622124 0.0595797
1158 1 0.187372 0.555796 0.119325
1162 1 0.317194 0.560215 0.127185
1193 1 0.253207 0.617866 0.127388
1534 1 0.943327 0.94002 0.374678
407 1 0.694558 0.998608 0.43347
1040 1 0.37427 0.563052 0.0560735
1071 1 0.432908 0.627312 0.0581489
1166 1 0.433096 0.564607 0.123359
1197 1 0.374439 0.627356 0.120004
1044 1 0.494067 0.561921 0.0521691
1125 1 0.124309 0.876211 6.64783e-05
1039 1 0.434732 0.503491 0.064942
1427 1 0.566762 0.504909 0.440626
1201 1 0.496927 0.623337 0.11792
1048 1 0.625011 0.563178 0.0614762
1075 1 0.560721 0.618257 0.0611283
1170 1 0.557725 0.558994 0.120753
1205 1 0.624376 0.618986 0.124005
1415 1 0.183011 0.503743 0.435126
1598 1 0.93666 0.690868 0.500075
1102 1 0.437067 0.820803 7.10059e-05
1090 1 0.0711175 0.818797 0.00372845
1052 1 0.75773 0.558045 0.06573
1079 1 0.698613 0.621309 0.0610175
1083 1 0.811946 0.622842 0.0636236
1174 1 0.685062 0.563996 0.129781
1178 1 0.815522 0.550836 0.128244
1209 1 0.749228 0.618198 0.126995
1175 1 0.683829 0.502758 0.1869
157 1 0.874523 0.999901 0.122964
1155 1 0.0675237 0.500043 0.190796
1185 1 0.00847947 0.617397 0.123652
1056 1 0.878015 0.562736 0.07042
1087 1 0.945817 0.619246 0.0606122
1182 1 0.941771 0.5642 0.127638
1213 1 0.875889 0.625678 0.125615
1028 1 0.00453041 0.56067 0.0601073
1626 1 0.817073 0.811582 0.494428
1297 1 0.498005 0.504056 0.247225
1171 1 0.565411 0.501984 0.182674
1064 1 0.122638 0.690413 0.0636772
1091 1 0.06273 0.752226 0.0609025
1096 1 0.137087 0.814072 0.0623623
1186 1 0.0605693 0.679274 0.127177
1218 1 0.0605138 0.81313 0.12407
1221 1 0.1268 0.752125 0.118666
1092 1 0.99516 0.816038 0.0666903
1299 1 0.564031 0.503806 0.306888
1068 1 0.25701 0.681967 0.0562504
1095 1 0.194916 0.750677 0.0636509
1099 1 0.314372 0.757104 0.0603075
1100 1 0.246903 0.820885 0.0668982
1190 1 0.192391 0.677272 0.115766
1194 1 0.315662 0.689881 0.127157
1222 1 0.189436 0.821751 0.12816
1225 1 0.254522 0.750478 0.119949
1226 1 0.321507 0.814946 0.121185
1307 1 0.807777 0.505585 0.314437
1303 1 0.684161 0.501582 0.32147
1072 1 0.371266 0.690961 0.0640813
1103 1 0.438701 0.752879 0.0632551
1104 1 0.377778 0.813003 0.0639307
1198 1 0.435183 0.68641 0.119374
1229 1 0.382519 0.751252 0.125247
1230 1 0.447063 0.80739 0.128476
1233 1 0.495861 0.744532 0.126569
1108 1 0.505753 0.813954 0.0639103
1076 1 0.501515 0.686959 0.0606644
31 1 0.942346 1.0001 0.0638014
1605 1 0.122306 0.745845 0.493294
1618 1 0.562678 0.810423 0.500604
1080 1 0.622063 0.681989 0.0594499
1107 1 0.567519 0.747442 0.0602581
1112 1 0.632033 0.809557 0.0561047
1202 1 0.558787 0.686897 0.12634
1234 1 0.568117 0.804033 0.125456
1237 1 0.627603 0.744371 0.119619
1084 1 0.755605 0.681049 0.0664197
1111 1 0.695802 0.743128 0.0632677
1115 1 0.809683 0.74876 0.0744127
1116 1 0.75472 0.812657 0.0571548
1206 1 0.692309 0.678065 0.126214
1210 1 0.814427 0.68447 0.130132
1238 1 0.69626 0.805441 0.123349
1241 1 0.754174 0.740904 0.134051
1242 1 0.815352 0.810193 0.129087
1217 1 0.00182676 0.74627 0.122835
1060 1 0.00825647 0.686306 0.0589835
1088 1 0.872368 0.694527 0.0684362
1119 1 0.932836 0.751834 0.064045
1120 1 0.87513 0.817449 0.0624193
1214 1 0.937675 0.691195 0.12231
1245 1 0.874782 0.757001 0.126349
1246 1 0.940427 0.809031 0.12464
1078 1 0.696682 0.681427 0.0019747
1285 1 0.12524 0.506434 0.249806
1123 1 0.0601477 0.869856 0.0670597
1128 1 0.128462 0.938842 0.061511
1250 1 0.0615206 0.936321 0.126462
1253 1 0.125964 0.8762 0.117577
1535 1 0.936076 0.875691 0.434974
1249 1 0.999151 0.876678 0.129131
1124 1 0.999694 0.934012 0.0667044
1578 1 0.30736 0.688238 0.499411
1536 1 0.874007 0.942461 0.426631
1429 1 0.620837 0.502484 0.378893
133 1 0.119206 0.997812 0.125345
1127 1 0.190303 0.878163 0.0626604
1131 1 0.313932 0.879891 0.0586566
1132 1 0.247556 0.93798 0.0665568
1254 1 0.183152 0.931845 0.126616
1257 1 0.250122 0.878208 0.127175
1258 1 0.31268 0.937643 0.129614
1609 1 0.247714 0.754756 0.497572
3 1 0.0548468 0.995601 0.0589671
1135 1 0.440785 0.881139 0.0628519
1136 1 0.380137 0.943679 0.0589551
1261 1 0.376143 0.873645 0.127312
1262 1 0.439502 0.939311 0.127442
1505 1 0.996313 0.871137 0.373758
1646 1 0.428454 0.936589 0.50134
1140 1 0.508552 0.935835 0.0667814
1139 1 0.565322 0.87345 0.0592751
1144 1 0.626403 0.933606 0.0602633
1266 1 0.565337 0.950207 0.131114
1269 1 0.630752 0.871362 0.126146
1265 1 0.504943 0.871541 0.1293
1529 1 0.757047 0.867918 0.379978
1527 1 0.687617 0.87745 0.439238
1143 1 0.691488 0.873126 0.0610957
1147 1 0.820004 0.875539 0.0654663
1148 1 0.753854 0.934781 0.0620016
1270 1 0.688298 0.928766 0.124194
1273 1 0.756851 0.882955 0.120283
1274 1 0.805833 0.947659 0.122333
1641 1 0.244232 0.873646 0.496168
1411 1 0.0642013 0.503386 0.435587
1526 1 0.692191 0.933801 0.377296
1151 1 0.936774 0.876148 0.067498
1152 1 0.877006 0.942479 0.060923
1277 1 0.878767 0.873481 0.132128
1278 1 0.935274 0.935274 0.127251
1546 1 0.312669 0.562669 0.495709
1122 1 0.0584717 0.936749 -0.000910089
1094 1 0.194219 0.811033 0.00251646
1530 1 0.809069 0.941631 0.370993
1061 1 0.12856 0.626599 0.00188494
1160 1 0.128015 0.56252 0.183756
1187 1 0.0708956 0.622734 0.192958
1282 1 0.0622737 0.561234 0.246432
1288 1 0.132096 0.561106 0.313407
1315 1 0.0664723 0.619928 0.30467
1317 1 0.133586 0.624499 0.248587
1531 1 0.820043 0.87898 0.431476
1164 1 0.250848 0.558375 0.19288
1191 1 0.184042 0.625851 0.185478
1195 1 0.317947 0.620037 0.186049
1286 1 0.189587 0.560332 0.252886
1290 1 0.311574 0.565941 0.255108
1292 1 0.255381 0.563464 0.321946
1319 1 0.193432 0.626087 0.315285
1321 1 0.245388 0.626968 0.246121
1323 1 0.317251 0.629104 0.313891
1532 1 0.754556 0.930936 0.433212
1524 1 0.497276 0.940948 0.434613
1168 1 0.36496 0.549626 0.197565
1199 1 0.433622 0.623486 0.189564
1294 1 0.428875 0.561369 0.248013
1296 1 0.378788 0.560223 0.318358
1325 1 0.375082 0.62371 0.253748
1327 1 0.441542 0.620991 0.319252
1521 1 0.496836 0.875801 0.375411
1523 1 0.564516 0.874441 0.437169
1300 1 0.496944 0.562406 0.311646
1329 1 0.492081 0.626328 0.253724
1172 1 0.498874 0.558502 0.182939
1176 1 0.620456 0.561677 0.18506
1203 1 0.555848 0.622304 0.18418
1298 1 0.559078 0.561744 0.248234
1304 1 0.621479 0.56147 0.311372
1331 1 0.556836 0.623556 0.309609
1333 1 0.623548 0.620796 0.250636
1130 1 0.318232 0.937823 -0.000834063
277 1 0.628047 0.997973 0.252207
1525 1 0.624341 0.873206 0.369869
1180 1 0.746066 0.557118 0.192857
1207 1 0.688687 0.621524 0.192681
1211 1 0.813674 0.616347 0.188243
1302 1 0.683683 0.557667 0.255255
1306 1 0.819384 0.561351 0.252836
1308 1 0.746858 0.564623 0.309673
1335 1 0.68227 0.629462 0.308238
1337 1 0.747505 0.626732 0.254162
1339 1 0.814948 0.622867 0.313757
1522 1 0.566979 0.929757 0.377523
1284 1 0.00543869 0.558811 0.315912
1156 1 0.000836113 0.561586 0.192946
1313 1 0.00124691 0.628769 0.249205
1184 1 0.875114 0.560325 0.186511
1215 1 0.944853 0.630253 0.182104
1310 1 0.949425 0.564347 0.255975
1312 1 0.877188 0.56131 0.31507
1341 1 0.88262 0.621328 0.246288
1343 1 0.945237 0.627171 0.313598
385 1 0.003555 0.998969 0.382298
1192 1 0.130467 0.688962 0.182848
1219 1 0.0692621 0.760361 0.18349
1224 1 0.126303 0.819694 0.185609
1314 1 0.070349 0.69364 0.250085
1320 1 0.12569 0.685083 0.318422
1346 1 0.0655339 0.810807 0.251034
1347 1 0.0577804 0.752985 0.311149
1349 1 0.129616 0.756587 0.252286
1352 1 0.122927 0.814453 0.316105
1348 1 0.998973 0.811443 0.307909
1196 1 0.244661 0.690624 0.185864
1223 1 0.193144 0.750463 0.17907
1227 1 0.308916 0.757024 0.193171
1228 1 0.250889 0.817915 0.201015
1318 1 0.185081 0.691589 0.251747
1322 1 0.310973 0.687683 0.251235
1324 1 0.255282 0.690311 0.318357
1350 1 0.19149 0.817072 0.253227
1351 1 0.194918 0.753168 0.316294
1353 1 0.249895 0.754362 0.251547
1354 1 0.31112 0.813839 0.253997
1355 1 0.311171 0.749751 0.315019
1356 1 0.246943 0.819847 0.317158
1200 1 0.374913 0.683564 0.186104
1231 1 0.439107 0.745424 0.191044
1232 1 0.381243 0.808686 0.192237
1326 1 0.433454 0.683309 0.25334
1328 1 0.374773 0.69076 0.32095
1357 1 0.372733 0.746068 0.243528
1358 1 0.442385 0.815131 0.253625
1359 1 0.436942 0.749 0.315935
1360 1 0.379961 0.81101 0.305216
1361 1 0.498246 0.744948 0.255669
1364 1 0.502441 0.810303 0.313412
1204 1 0.495866 0.682905 0.186606
1236 1 0.50172 0.804767 0.191509
1332 1 0.496834 0.684205 0.315786
1208 1 0.620564 0.674967 0.183189
1235 1 0.565268 0.750519 0.194252
1240 1 0.628577 0.814587 0.190441
1330 1 0.567253 0.686482 0.247729
1336 1 0.624868 0.685055 0.313437
1362 1 0.562772 0.808853 0.251019
1363 1 0.564404 0.749782 0.319254
1365 1 0.625531 0.750084 0.252922
1368 1 0.625116 0.807799 0.311826
1212 1 0.754505 0.677408 0.190428
1239 1 0.684134 0.748536 0.19166
1243 1 0.817668 0.741442 0.188183
1244 1 0.751056 0.810773 0.184556
1334 1 0.683589 0.685023 0.248662
1338 1 0.813805 0.680528 0.250471
1340 1 0.749957 0.686875 0.317404
1366 1 0.686589 0.807882 0.249227
1367 1 0.692086 0.744115 0.313913
1369 1 0.750216 0.744489 0.246158
1370 1 0.81926 0.80731 0.248109
1371 1 0.816323 0.748249 0.311801
1372 1 0.754728 0.812815 0.308175
1316 1 0.00118181 0.689095 0.311934
1188 1 0.00164202 0.694038 0.18683
1345 1 0.998497 0.756107 0.244475
1220 1 0.00161086 0.812614 0.186984
1216 1 0.876498 0.683419 0.191402
1247 1 0.934132 0.748465 0.183314
1248 1 0.876542 0.809873 0.189876
1342 1 0.936543 0.687931 0.254227
1344 1 0.87677 0.683585 0.309597
1373 1 0.876287 0.742255 0.251262
1374 1 0.933791 0.8134 0.250418
1375 1 0.938743 0.7489 0.309462
1376 1 0.872251 0.810236 0.313083
1602 1 0.0597055 0.803139 0.498913
1251 1 0.0591529 0.876556 0.18883
1256 1 0.120808 0.938654 0.191983
1378 1 0.0554799 0.937228 0.249328
1379 1 0.0615148 0.871559 0.310681
1381 1 0.122243 0.872608 0.252392
1384 1 0.12224 0.942778 0.320088
1377 1 -0.000710476 0.876474 0.252301
1528 1 0.628631 0.932009 0.434328
1433 1 0.750516 0.507214 0.378475
1086 1 0.939323 0.691231 0.00607765
387 1 0.0588285 1.00051 0.445282
1255 1 0.183695 0.878229 0.197145
1259 1 0.315557 0.878046 0.190496
1260 1 0.256709 0.93979 0.18873
1382 1 0.188678 0.940379 0.255012
1383 1 0.178887 0.884577 0.320128
1385 1 0.255737 0.880023 0.262321
1386 1 0.31054 0.943358 0.250255
1387 1 0.318611 0.881665 0.319309
1388 1 0.242698 0.944257 0.315937
1431 1 0.699052 0.50254 0.440204
7 1 0.182374 0.997281 0.064009
1263 1 0.447041 0.865525 0.188108
1264 1 0.375038 0.93582 0.188888
1389 1 0.373245 0.874748 0.255222
1390 1 0.433909 0.938152 0.25663
1391 1 0.439097 0.872793 0.314989
1392 1 0.375967 0.940012 0.315268
1396 1 0.498683 0.938132 0.315161
1393 1 0.504613 0.881778 0.251632
1268 1 0.495954 0.94567 0.195052
1267 1 0.56636 0.868823 0.189639
1272 1 0.625909 0.942341 0.179137
1394 1 0.562401 0.940784 0.246119
1395 1 0.561184 0.872256 0.314745
1397 1 0.623413 0.878316 0.251036
1400 1 0.625146 0.934216 0.30945
1606 1 0.184621 0.820448 0.497782
1093 1 0.134655 0.749581 0.00247238
1520 1 0.371441 0.939053 0.439985
1305 1 0.749868 0.505347 0.256502
1271 1 0.688261 0.875642 0.191376
1275 1 0.813334 0.873836 0.187012
1276 1 0.752056 0.944015 0.18726
1398 1 0.688928 0.940532 0.24483
1399 1 0.693375 0.873448 0.309041
1401 1 0.752155 0.871917 0.248315
1402 1 0.809452 0.935057 0.249844
1403 1 0.807549 0.88091 0.315837
1404 1 0.747522 0.940532 0.313098
1519 1 0.436905 0.87876 0.436279
1027 1 0.0596578 0.50045 0.0642878
401 1 0.504519 0.997917 0.37127
1517 1 0.378441 0.879468 0.378184
1380 1 0.995779 0.932852 0.314397
1252 1 0.00391275 0.94225 0.185948
1279 1 0.93787 0.876155 0.188432
1280 1 0.876375 0.944051 0.186678
1405 1 0.870265 0.875113 0.247041
1406 1 0.942515 0.938535 0.24747
1407 1 0.936242 0.871052 0.308461
1408 1 0.882092 0.939144 0.308612
137 1 0.247747 0.999774 0.119761
1650 1 0.564768 0.937474 0.493087
405 1 0.626461 0.994029 0.376334
1410 1 0.0730154 0.561224 0.372296
1416 1 0.125648 0.571569 0.444785
1443 1 0.0602301 0.625921 0.443286
1445 1 0.127763 0.624456 0.375467
1441 1 0.0031515 0.619382 0.379682
1661 1 0.876482 0.876482 0.493352
1169 1 0.501472 0.503379 0.119534
143 1 0.428534 0.999497 0.18778
1518 1 0.438182 0.937652 0.373351
265 1 0.25165 0.998957 0.245484
1594 1 0.816681 0.692508 0.495505
1420 1 0.251052 0.559848 0.437037
1451 1 0.305852 0.626595 0.436357
1449 1 0.251687 0.623715 0.374336
1447 1 0.195533 0.625584 0.436573
1414 1 0.186688 0.561885 0.370543
1418 1 0.314374 0.558826 0.377884
1173 1 0.621289 0.503645 0.119628
1424 1 0.371847 0.563378 0.436078
1453 1 0.367691 0.627711 0.376032
1455 1 0.432839 0.628824 0.439227
1422 1 0.435047 0.564827 0.38061
1457 1 0.504035 0.627279 0.383602
1428 1 0.494439 0.559508 0.445103
1461 1 0.623443 0.624901 0.379498
1426 1 0.560411 0.565577 0.381297
1432 1 0.629013 0.573157 0.447468
1459 1 0.564316 0.633665 0.442403
1516 1 0.24977 0.934044 0.441088
1283 1 0.0701835 0.501507 0.312542
1511 1 0.181432 0.88172 0.433871
1510 1 0.184216 0.946024 0.378313
1463 1 0.689394 0.629169 0.443956
1436 1 0.756387 0.576387 0.439126
1434 1 0.813118 0.566051 0.37541
1465 1 0.751692 0.628905 0.373997
1467 1 0.822515 0.631347 0.44625
1430 1 0.68692 0.565347 0.383982
1509 1 0.121722 0.87423 0.377407
1508 1 0.994795 0.936201 0.435804
273 1 0.495731 0.998872 0.255956
1514 1 0.308 0.939434 0.379634
1412 1 0.00431126 0.560067 0.434391
1440 1 0.87834 0.5579 0.440436
1471 1 0.936715 0.62297 0.438065
1438 1 0.937481 0.558992 0.375783
1469 1 0.878632 0.625402 0.376089
1442 1 0.0605257 0.684716 0.384094
1480 1 0.123399 0.812388 0.430658
1474 1 0.0558001 0.811781 0.373283
1477 1 0.134584 0.748889 0.367449
1475 1 0.0561127 0.752573 0.431897
1448 1 0.132847 0.683126 0.439598
1444 1 0.996611 0.687942 0.435605
1473 1 0.99729 0.745763 0.371299
275 1 0.569037 0.991951 0.31561
1513 1 0.24958 0.876561 0.38113
1483 1 0.312558 0.756216 0.439603
1481 1 0.250502 0.752251 0.37653
1452 1 0.24849 0.689425 0.434589
1479 1 0.187518 0.753122 0.43323
1450 1 0.307075 0.694589 0.379553
1478 1 0.188386 0.814664 0.379404
1484 1 0.25201 0.819552 0.435113
1482 1 0.317825 0.815788 0.373938
1446 1 0.188716 0.68399 0.372682
1633 1 0.995235 0.878524 0.490914
1456 1 0.368445 0.684126 0.43727
1485 1 0.380653 0.750223 0.376339
1486 1 0.448322 0.811442 0.377507
1488 1 0.380119 0.811061 0.437389
1454 1 0.439223 0.687425 0.380868
1487 1 0.432218 0.745642 0.446441
1142 1 0.685069 0.937132 0.00522282
1492 1 0.506335 0.807065 0.437311
1489 1 0.506202 0.747554 0.377704
1460 1 0.498773 0.692466 0.442697
1490 1 0.564519 0.807863 0.375991
1491 1 0.567582 0.74809 0.43406
1493 1 0.63161 0.750299 0.375741
1458 1 0.569842 0.68809 0.376127
1496 1 0.623851 0.812559 0.435683
1464 1 0.629712 0.685291 0.441805
1630 1 0.941135 0.818677 0.49898
1466 1 0.81656 0.691001 0.377325
1499 1 0.821644 0.74946 0.435557
1498 1 0.818925 0.808116 0.374317
1495 1 0.688477 0.755727 0.4374
1462 1 0.688646 0.683057 0.375656
1468 1 0.754519 0.690036 0.432757
1500 1 0.747823 0.815819 0.439698
1494 1 0.681471 0.809215 0.365814
1497 1 0.749424 0.759079 0.377215
1047 1 0.686499 0.50171 0.0689215
1502 1 0.939871 0.809759 0.371768
1503 1 0.939591 0.753252 0.43906
1501 1 0.880715 0.751082 0.367451
1504 1 0.878373 0.811902 0.434662
1476 1 0.997026 0.814228 0.433544
1470 1 0.940484 0.68428 0.376202
1472 1 0.88034 0.694274 0.432427
1515 1 0.315937 0.877674 0.439905
1043 1 0.559443 0.501291 0.055496
1512 1 0.125146 0.944522 0.441238
1506 1 0.0638242 0.935883 0.384404
1507 1 0.0599346 0.87414 0.438697
1309 1 0.878074 0.50496 0.250884
1654 1 0.685767 0.940823 0.49921
27 1 0.812924 1.0004 0.0607543
1114 1 0.816863 0.809935 0.00273596
23 1 0.697727 0.997097 0.0664149
1179 1 0.815674 0.501051 0.191694
1035 1 0.317653 0.502863 0.0652522
1435 1 0.811767 0.5024 0.43482
1569 1 0.99705 0.620122 0.498324
1658 1 0.816041 0.942391 0.49143
1653 1 0.624777 0.871506 0.497777
25 1 0.749438 0.998525 0.00375035
1149 1 0.875749 0.872695 0.00526139
1550 1 0.429165 0.561077 0.499175
1073 1 0.49478 0.623684 0.00144146
1570 1 0.0674568 0.692654 0.494707
1089 1 0.000901557 0.756791 0.00385585
1082 1 0.817276 0.684546 -4.25786e-05
1118 1 0.937315 0.810903 0.00334203
1045 1 0.625231 0.503871 0.00361404
1074 1 0.560691 0.679131 0.00440291
1105 1 0.503377 0.756969 0.00249436
1054 1 0.945848 0.560323 0.00429125
1581 1 0.36808 0.624006 0.498876
1113 1 0.757492 0.750338 0.000735515
1649 1 0.501238 0.879541 0.496461
1109 1 0.634666 0.741575 0.00154495
1133 1 0.374713 0.877479 0.00250637
1134 1 0.446703 0.943697 0.00261958
1117 1 0.871067 0.749244 0.00756217
1145 1 0.75349 0.879748 0.00114931
1662 1 0.933626 0.942087 0.493383
1541 1 0.120378 0.506424 0.499098
1030 1 0.185186 0.559073 0.00528178
1058 1 0.0672243 0.686303 0.0024257
1544 1 0.12282 0.563367 0.564593
1571 1 0.0632208 0.629498 0.554228
1666 1 0.0620014 0.571073 0.626496
1701 1 0.127854 0.628112 0.625668
1097 1 0.253077 0.752892 0.997204
1697 1 0.00490755 0.627194 0.625653
919 1 0.688206 0.998079 0.940059
1621 1 0.623461 0.752578 0.505365
1070 1 0.444629 0.694518 0.999663
1548 1 0.24447 0.56186 0.562679
1575 1 0.187328 0.626252 0.565317
1579 1 0.307708 0.625088 0.568482
1670 1 0.184595 0.558381 0.624835
1674 1 0.310717 0.56037 0.623717
1705 1 0.239065 0.631163 0.624126
2020 1 0.99403 0.936 0.941365
1945 1 0.74585 0.502199 0.871973
523 1 0.315584 0.999337 0.560029
1552 1 0.368741 0.563948 0.565595
1583 1 0.429632 0.621399 0.568151
1678 1 0.432756 0.560468 0.626714
1709 1 0.3691 0.621066 0.630578
1713 1 0.50047 0.62667 0.623607
2045 1 0.867 0.870793 0.877748
1590 1 0.691108 0.694821 0.500123
1556 1 0.494135 0.569019 0.564976
1560 1 0.624137 0.561837 0.558855
1587 1 0.557623 0.629611 0.564541
1682 1 0.557692 0.56631 0.619659
1717 1 0.624341 0.620858 0.620043
2046 1 0.932415 0.933745 0.873465
2047 1 0.934605 0.874675 0.937158
1589 1 0.62554 0.632864 0.506899
1564 1 0.75755 0.559372 0.561685
1591 1 0.691653 0.618752 0.565275
1595 1 0.819324 0.623736 0.563816
1686 1 0.688404 0.56283 0.621967
1690 1 0.813324 0.558183 0.626073
1721 1 0.750841 0.626286 0.620361
1601 1 0.00172403 0.750499 0.499046
1540 1 0.000722266 0.554993 0.563994
1568 1 0.875517 0.557069 0.568701
1599 1 0.940518 0.622822 0.561395
1694 1 0.936984 0.559707 0.634115
1725 1 0.880877 0.628439 0.622165
1683 1 0.556463 0.501519 0.682369
1811 1 0.566792 0.502313 0.810648
1576 1 0.131667 0.691122 0.56567
1603 1 0.0613871 0.747266 0.562202
1608 1 0.125996 0.807424 0.558238
1698 1 0.0608499 0.686884 0.621888
1730 1 0.0589987 0.811183 0.624696
1733 1 0.12308 0.751304 0.629138
1572 1 0.99774 0.682853 0.556786
1604 1 0.996721 0.810911 0.561764
1729 1 0.997729 0.745902 0.623423
1679 1 0.435101 0.505766 0.686896
923 1 0.801792 0.998721 0.936224
1150 1 0.933957 0.937455 0.997719
1634 1 0.0537422 0.945784 0.502936
1580 1 0.24407 0.692492 0.562643
1607 1 0.189582 0.749378 0.558082
1611 1 0.310537 0.745672 0.559347
1612 1 0.246679 0.817947 0.565827
1702 1 0.188418 0.695886 0.630907
1706 1 0.308922 0.683683 0.627566
1734 1 0.184836 0.82127 0.624119
1737 1 0.253557 0.756972 0.621757
1738 1 0.312833 0.822717 0.622222
1046 1 0.68447 0.561425 0.999838
29 1 0.868175 0.99516 0.992745
1815 1 0.684151 0.504092 0.803439
1584 1 0.371888 0.680321 0.5652
1615 1 0.441528 0.751936 0.557503
1616 1 0.369181 0.814892 0.566168
1710 1 0.43224 0.684269 0.620441
1741 1 0.374344 0.754075 0.622602
1742 1 0.436848 0.810897 0.62127
1588 1 0.494943 0.686284 0.566766
1685 1 0.624126 0.506259 0.620458
1813 1 0.625792 0.499504 0.744378
1559 1 0.692038 0.502265 0.566444
1620 1 0.498939 0.814049 0.562296
1745 1 0.488571 0.746559 0.626697
1592 1 0.62579 0.687929 0.570818
1619 1 0.559095 0.756705 0.566351
1624 1 0.626482 0.812867 0.573114
1714 1 0.556372 0.691138 0.624527
1746 1 0.560169 0.815904 0.63174
1749 1 0.630093 0.74604 0.627723
917 1 0.62924 0.999529 0.874365
1596 1 0.761417 0.688243 0.559034
1623 1 0.684024 0.751397 0.560435
1627 1 0.815202 0.757449 0.558819
1628 1 0.748117 0.814209 0.562595
1718 1 0.687196 0.684793 0.626121
1722 1 0.813997 0.684357 0.623361
1750 1 0.689781 0.815699 0.620148
1753 1 0.752455 0.747429 0.619418
1754 1 0.810662 0.813916 0.622864
1538 1 0.0610477 0.561466 0.499916
1555 1 0.560872 0.500229 0.556808
1625 1 0.752385 0.753391 0.499933
1558 1 0.695937 0.564631 0.500703
1566 1 0.937392 0.5607 0.505065
1600 1 0.878934 0.691235 0.561667
1631 1 0.933165 0.752041 0.558813
1632 1 0.877517 0.820327 0.556203
1726 1 0.938103 0.688503 0.619467
1757 1 0.873405 0.749953 0.622386
1758 1 0.937035 0.811175 0.622534
1610 1 0.312949 0.811379 0.503464
1574 1 0.192229 0.689648 0.499839
1635 1 0.0517647 0.869262 0.564325
1640 1 0.124352 0.939262 0.562378
1762 1 0.0509045 0.937995 0.621091
1765 1 0.117455 0.877356 0.622376
1636 1 0.992001 0.94124 0.56012
1761 1 0.00341362 0.874744 0.633246
1081 1 0.756013 0.62089 0.999353
1795 1 0.057117 0.503335 0.814062
2048 1 0.865897 0.936585 0.9311
1639 1 0.182167 0.871481 0.558276
1643 1 0.308934 0.880477 0.557645
1644 1 0.250568 0.944543 0.561857
1766 1 0.186765 0.936926 0.612769
1769 1 0.249999 0.879698 0.620653
1770 1 0.319614 0.940953 0.621326
2017 1 0.997194 0.870034 0.8795
1687 1 0.681128 0.501034 0.680127
1573 1 0.125245 0.631935 0.502133
1617 1 0.50176 0.748401 0.501561
1803 1 0.315174 0.503976 0.810351
1817 1 0.745486 0.502358 0.742646
1647 1 0.437255 0.876021 0.557899
1648 1 0.376101 0.937152 0.569028
1773 1 0.375738 0.874185 0.625146
1774 1 0.440741 0.941553 0.628932
1652 1 0.500894 0.940238 0.564276
1586 1 0.559014 0.690863 0.507302
1141 1 0.625226 0.871313 0.999647
1777 1 0.492475 0.875431 0.61859
1651 1 0.567816 0.871184 0.562713
1656 1 0.622409 0.936312 0.554212
1778 1 0.564089 0.933447 0.623003
1781 1 0.622499 0.87161 0.631297
9 1 0.245777 0.993066 1.00078
1677 1 0.375056 0.505918 0.621897
1613 1 0.376615 0.755203 0.506501
1146 1 0.811662 0.935515 0.998815
1655 1 0.686197 0.881138 0.558579
1659 1 0.815764 0.869437 0.559112
1660 1 0.750705 0.941313 0.554214
1782 1 0.688886 0.935396 0.625448
1785 1 0.747114 0.878413 0.621227
1786 1 0.813964 0.933351 0.618444
1642 1 0.316571 0.943048 0.499523
1821 1 0.87422 0.501615 0.74611
899 1 0.05604 0.995273 0.938507
539 1 0.813705 0.996521 0.554862
1138 1 0.563634 0.933354 0.999299
1935 1 0.438789 0.503146 0.938674
1663 1 0.945781 0.879027 0.560073
1664 1 0.877684 0.937208 0.560735
1789 1 0.878308 0.876363 0.625361
1790 1 0.939738 0.932692 0.625443
1665 1 0.00654169 0.500831 0.631068
663 1 0.689319 0.996617 0.68233
517 1 0.124477 0.997269 0.504262
1582 1 0.430077 0.682064 0.500926
1672 1 0.126232 0.559152 0.688621
1699 1 0.0656453 0.62742 0.695457
1794 1 0.0648279 0.563692 0.751647
1800 1 0.12774 0.559793 0.814948
1827 1 0.0628956 0.617968 0.814378
1829 1 0.126947 0.620741 0.750258
1668 1 0.0024524 0.563949 0.688961
2039 1 0.690958 0.879279 0.934953
1939 1 0.570399 0.50819 0.935411
2042 1 0.811891 0.939217 0.872184
2044 1 0.749464 0.932126 0.93189
1676 1 0.245746 0.567739 0.684116
1703 1 0.188303 0.624326 0.688415
1707 1 0.310948 0.621674 0.700434
1798 1 0.18817 0.559447 0.752731
1802 1 0.317555 0.556423 0.750712
1804 1 0.252733 0.566422 0.815376
1831 1 0.186962 0.622215 0.815704
1833 1 0.243093 0.623452 0.751823
1835 1 0.316322 0.620492 0.811702
669 1 0.86959 0.992183 0.625417
1680 1 0.367995 0.559208 0.68768
1711 1 0.434175 0.624791 0.681992
1806 1 0.435874 0.560396 0.750054
1808 1 0.377941 0.561623 0.811322
1837 1 0.376224 0.62146 0.747995
1839 1 0.440372 0.620084 0.809972
1812 1 0.496989 0.559234 0.807895
2041 1 0.751589 0.877035 0.87184
1841 1 0.505636 0.625823 0.749304
1684 1 0.500621 0.565417 0.679462
1688 1 0.618078 0.564043 0.683178
1715 1 0.565248 0.63297 0.684644
1810 1 0.558434 0.560036 0.748314
1816 1 0.625971 0.570564 0.814035
1843 1 0.561167 0.626715 0.811789
1845 1 0.615718 0.626057 0.750997
1577 1 0.256186 0.624007 0.499651
1692 1 0.750873 0.555555 0.680812
1719 1 0.688745 0.625409 0.684943
1723 1 0.813946 0.622192 0.686121
1814 1 0.682916 0.562463 0.744082
1818 1 0.816024 0.560267 0.749236
1820 1 0.74797 0.561966 0.810654
1847 1 0.690269 0.629784 0.810254
1849 1 0.74705 0.620577 0.742257
1851 1 0.816925 0.620216 0.809083
1825 1 0.995902 0.622277 0.749795
1796 1 0.996398 0.565786 0.807788
1696 1 0.878971 0.566899 0.685565
1727 1 0.93726 0.630074 0.687179
1822 1 0.935627 0.562676 0.750749
1824 1 0.881481 0.556768 0.811289
1853 1 0.874871 0.629079 0.746581
1855 1 0.938404 0.630936 0.811058
2043 1 0.818532 0.867883 0.941957
1704 1 0.123798 0.687804 0.690297
1731 1 0.0609619 0.750012 0.683242
1736 1 0.1261 0.815455 0.684981
1826 1 0.0613667 0.685671 0.753699
1832 1 0.122122 0.684451 0.812293
1858 1 0.0619008 0.808229 0.743276
1859 1 0.0635972 0.748192 0.814813
1861 1 0.126433 0.751467 0.747752
1864 1 0.122683 0.809107 0.810312
1732 1 0.998923 0.806374 0.684111
1708 1 0.247217 0.689855 0.689115
1735 1 0.186537 0.75094 0.68708
1739 1 0.314865 0.750001 0.681127
1740 1 0.250226 0.815255 0.682643
1830 1 0.191032 0.682909 0.746532
1834 1 0.310017 0.694752 0.746022
1836 1 0.250736 0.685462 0.817652
1862 1 0.191343 0.813856 0.745415
1863 1 0.18265 0.74999 0.812123
1865 1 0.251875 0.751134 0.750369
1866 1 0.313376 0.820285 0.748175
1867 1 0.311396 0.751583 0.815039
1868 1 0.253991 0.817501 0.811024
1712 1 0.374505 0.682404 0.690056
1743 1 0.428304 0.746895 0.688314
1744 1 0.378356 0.814092 0.678763
1838 1 0.440327 0.682578 0.755033
1840 1 0.370093 0.690252 0.807488
1869 1 0.370503 0.755473 0.746676
1870 1 0.437786 0.812174 0.751268
1871 1 0.433892 0.750289 0.812231
1872 1 0.37693 0.814227 0.811698
1844 1 0.501817 0.688099 0.810382
1873 1 0.498436 0.749047 0.747012
1748 1 0.49304 0.809798 0.688206
1876 1 0.501148 0.809405 0.813213
1716 1 0.495133 0.690447 0.687773
1720 1 0.628061 0.689071 0.687604
1747 1 0.559158 0.750818 0.684265
1752 1 0.623858 0.813375 0.687816
1842 1 0.56324 0.692102 0.749819
1848 1 0.622635 0.68912 0.8129
1874 1 0.553444 0.81235 0.744324
1875 1 0.559364 0.753408 0.811584
1877 1 0.631732 0.743604 0.750218
1880 1 0.625233 0.810115 0.810003
1724 1 0.752282 0.688839 0.683048
1751 1 0.694952 0.746949 0.684023
1755 1 0.810231 0.747851 0.680762
1756 1 0.749506 0.810293 0.682023
1846 1 0.692136 0.688241 0.742857
1850 1 0.808247 0.687582 0.744326
1852 1 0.756547 0.684454 0.809712
1878 1 0.688777 0.812117 0.753986
1879 1 0.687605 0.752541 0.806002
1881 1 0.752158 0.756842 0.745011
1882 1 0.817521 0.817399 0.748751
1883 1 0.806897 0.748833 0.808873
1884 1 0.751289 0.81749 0.810223
1857 1 0.00243808 0.744231 0.759895
1860 1 0.00504279 0.810487 0.816262
1828 1 0.00352743 0.680642 0.81405
1700 1 0.00149213 0.686069 0.695211
1728 1 0.876484 0.689949 0.683775
1759 1 0.942127 0.746636 0.682379
1760 1 0.876192 0.804904 0.681972
1854 1 0.936978 0.68974 0.750018
1856 1 0.875063 0.6916 0.806923
1885 1 0.871718 0.756228 0.747488
1886 1 0.937203 0.807333 0.756817
1887 1 0.938151 0.746046 0.812679
1888 1 0.872727 0.815034 0.809875
1763 1 0.0633578 0.874755 0.688667
1768 1 0.122442 0.937185 0.677479
1890 1 0.0529745 0.942358 0.753182
1891 1 0.0662668 0.875517 0.804554
1893 1 0.127315 0.873423 0.748484
1896 1 0.120959 0.935743 0.810861
1764 1 0.996392 0.942539 0.690456
2038 1 0.685726 0.940388 0.87663
1554 1 0.560074 0.566052 0.50157
531 1 0.566376 0.996685 0.565048
1767 1 0.189322 0.881112 0.680811
1771 1 0.314649 0.880582 0.684555
1772 1 0.248816 0.941919 0.681736
1894 1 0.183926 0.943145 0.747059
1895 1 0.191082 0.8788 0.808563
1897 1 0.249275 0.883945 0.749486
1898 1 0.31219 0.936769 0.747682
1899 1 0.312219 0.880336 0.81213
1900 1 0.251289 0.944297 0.810475
1069 1 0.374609 0.630874 1.00043
1775 1 0.441736 0.87723 0.689523
1776 1 0.376298 0.936178 0.685281
1901 1 0.37884 0.877834 0.749207
1902 1 0.439091 0.941546 0.759441
1903 1 0.43969 0.874916 0.810216
1904 1 0.375225 0.93613 0.816928
1780 1 0.500024 0.948199 0.688335
1905 1 0.498909 0.871698 0.752679
1908 1 0.504207 0.944052 0.814781
1593 1 0.752608 0.626256 0.50208
1779 1 0.559326 0.87798 0.685956
1784 1 0.623705 0.938851 0.687256
1906 1 0.559834 0.942939 0.748266
1907 1 0.565773 0.875908 0.809691
1909 1 0.619577 0.875016 0.745893
1912 1 0.622648 0.940721 0.809969
521 1 0.245447 0.995518 0.500735
1551 1 0.433836 0.501495 0.56099
913 1 0.507028 0.998387 0.877552
1783 1 0.687417 0.871497 0.689687
1787 1 0.817212 0.876355 0.686562
1788 1 0.753732 0.933858 0.685052
1910 1 0.682546 0.934074 0.750951
1911 1 0.688133 0.875504 0.812412
1913 1 0.747032 0.871463 0.745257
1914 1 0.809455 0.936956 0.748125
1915 1 0.813396 0.878696 0.808869
1916 1 0.747765 0.934705 0.810796
897 1 0.998819 0.997928 0.871155
1943 1 0.689948 0.500866 0.940304
2035 1 0.569252 0.873058 0.935728
2037 1 0.633378 0.872004 0.87571
1892 1 0.995679 0.939778 0.813289
1791 1 0.93146 0.872533 0.691802
1792 1 0.875649 0.935621 0.695297
1917 1 0.872819 0.876425 0.755309
1918 1 0.935806 0.933969 0.752139
1919 1 0.933754 0.87053 0.811789
1920 1 0.878842 0.936152 0.815717
1889 1 0.994214 0.871271 0.751339
1038 1 0.436424 0.566012 0.997447
1922 1 0.0625897 0.562385 0.87883
1928 1 0.131962 0.560165 0.937083
1955 1 0.057306 0.629662 0.936041
1957 1 0.121751 0.627008 0.878323
775 1 0.185579 0.998883 0.817729
915 1 0.563148 0.997036 0.937537
2036 1 0.505644 0.937468 0.935062
2018 1 0.0648953 0.931954 0.873709
795 1 0.816957 0.997705 0.807893
1930 1 0.318131 0.556345 0.875302
1926 1 0.187237 0.561856 0.872312
1963 1 0.320676 0.624223 0.938388
1961 1 0.246483 0.624383 0.880101
1932 1 0.253544 0.554596 0.936122
1959 1 0.188839 0.627336 0.938014
901 1 0.125997 0.993649 0.875427
2032 1 0.378912 0.934264 0.940345
2030 1 0.442418 0.937331 0.871432
1965 1 0.37541 0.619737 0.868764
1934 1 0.440636 0.557796 0.873177
1967 1 0.438179 0.623279 0.930113
1936 1 0.378923 0.560042 0.93254
1969 1 0.503838 0.622657 0.875253
1940 1 0.499247 0.555233 0.940084
1938 1 0.559907 0.560646 0.874496
1944 1 0.628831 0.564969 0.935649
1971 1 0.563061 0.6185 0.940231
1973 1 0.621679 0.622513 0.880693
1126 1 0.18467 0.93372 0.995251
1793 1 0.00451223 0.510097 0.747652
1979 1 0.813932 0.624021 0.930366
1948 1 0.759091 0.564742 0.937822
1975 1 0.693708 0.632065 0.938665
1977 1 0.757771 0.622464 0.867381
1946 1 0.817239 0.558744 0.87135
1942 1 0.689778 0.570683 0.880198
1941 1 0.638641 0.507761 0.873098
2040 1 0.624302 0.936107 0.937862
1953 1 0.998683 0.618581 0.872076
1561 1 0.762398 0.507352 0.500804
1924 1 0.998472 0.559982 0.93256
1983 1 0.938176 0.629138 0.94469
1950 1 0.934326 0.559019 0.872064
1952 1 0.878909 0.555801 0.942921
1981 1 0.877076 0.626392 0.869737
1629 1 0.874602 0.753455 0.500378
2021 1 0.128043 0.875836 0.87058
911 1 0.447237 0.996058 0.929952
1960 1 0.131272 0.686962 0.93872
1989 1 0.132162 0.752955 0.877861
1954 1 0.0638242 0.68148 0.875525
1986 1 0.0681272 0.819841 0.875566
1987 1 0.0649463 0.751964 0.943233
1992 1 0.124698 0.813005 0.940321
2028 1 0.255683 0.934443 0.938845
1669 1 0.125598 0.502374 0.628706
1996 1 0.253699 0.818259 0.935181
1964 1 0.253683 0.686558 0.938093
1991 1 0.196615 0.756576 0.936027
1993 1 0.254538 0.745529 0.871885
1958 1 0.188526 0.682316 0.87525
1962 1 0.318287 0.684456 0.877166
1995 1 0.3156 0.746644 0.93857
1990 1 0.195748 0.813646 0.872343
1994 1 0.310872 0.811959 0.874561
2033 1 0.503398 0.875884 0.870806
1997 1 0.371507 0.748966 0.874379
1968 1 0.37627 0.692658 0.935948
1998 1 0.439364 0.815455 0.868613
2000 1 0.382456 0.816909 0.931255
1999 1 0.442213 0.755095 0.932543
1966 1 0.436028 0.687322 0.866773
1972 1 0.503271 0.684892 0.936571
2004 1 0.502414 0.816851 0.934276
2025 1 0.254502 0.880207 0.873291
2001 1 0.502164 0.753836 0.877329
1970 1 0.558853 0.685218 0.877766
2005 1 0.622814 0.753106 0.876209
2008 1 0.630702 0.81506 0.936547
1976 1 0.625421 0.688129 0.93233
2003 1 0.561543 0.749063 0.939325
2002 1 0.565064 0.81811 0.875244
2026 1 0.309907 0.936978 0.879055
2019 1 0.0603629 0.883209 0.935105
2023 1 0.188441 0.871162 0.93441
2024 1 0.127338 0.934503 0.932911
1547 1 0.309211 0.502888 0.563161
1562 1 0.817719 0.567786 0.501274
2022 1 0.19373 0.93561 0.872497
1980 1 0.754473 0.689383 0.933617
2011 1 0.817143 0.751837 0.93787
2006 1 0.694172 0.80731 0.870154
2012 1 0.746435 0.81752 0.938095
2007 1 0.685556 0.747369 0.931993
2009 1 0.751342 0.751709 0.87361
1974 1 0.697003 0.693095 0.869537
2010 1 0.810119 0.810799 0.871552
1978 1 0.814997 0.694255 0.870636
2027 1 0.316624 0.878383 0.933512
793 1 0.748635 0.991479 0.743535
1985 1 0.00922741 0.75628 0.879612
1956 1 0.00132224 0.692286 0.932317
1988 1 0.994056 0.811709 0.933845
2013 1 0.877158 0.755638 0.878604
1984 1 0.870943 0.6905 0.928108
2014 1 0.936615 0.8145 0.868939
1982 1 0.940319 0.68952 0.878084
2015 1 0.935626 0.748254 0.944654
2016 1 0.874622 0.812162 0.946292
1567 1 0.940161 0.500401 0.564677
2029 1 0.374791 0.878521 0.869582
1693 1 0.879353 0.501159 0.627934
2034 1 0.565937 0.934076 0.873383
2031 1 0.438402 0.881671 0.944235
661 1 0.634659 0.997135 0.619128
5 1 0.120821 0.994673 0.998544
1066 1 0.314882 0.689857 1.00035
1034 1 0.312886 0.562559 0.998672
777 1 0.251279 0.996335 0.745413
1098 1 0.316667 0.814992 0.997991
1110 1 0.690829 0.813345 1.00034
1657 1 0.752598 0.868789 0.503843
771 1 0.0614815 0.993122 0.814064
1121 1 0.998841 0.864991 0.998327
909 1 0.379958 0.995081 0.877183
1129 1 0.254922 0.869467 0.999363
1549 1 0.3703 0.500842 0.510279
1065 1 0.2532 0.622345 0.994679
1597 1 0.880691 0.626273 0.503087
1050 1 0.813473 0.552744 0.997693
1042 1 0.565694 0.5604 0.996545
1026 1 0.0716435 0.561765 0.994391
1585 1 0.498438 0.630557 0.500841
1041 1 0.500305 0.500736 0.99807
1077 1 0.629516 0.626316 0.993366
1062 1 0.191551 0.690852 0.998174
1614 1 0.438444 0.81522 0.502108
1622 1 0.681808 0.809492 0.504276
1101 1 0.384581 0.752237 0.996699
529 1 0.504985 0.999546 0.501491
1637 1 0.117401 0.87046 0.505007
1542 1 0.191658 0.564429 0.505268
1645 1 0.37342 0.880029 0.502642
21 1 0.620527 0.994728 0.998695
1137 1 0.500166 0.879037 0.998807
1037 1 0.376943 0.500678 0.996001
1106 1 0.566612 0.813505 0.996437
1057 1 0.00472319 0.615704 0.997478
1085 1 0.87314 0.61592 0.99765
1638 1 0.188187 0.935877 0.504942
| [
"[email protected]"
] | |
a2f9ef70d3a18cdda815fb0235790cb046d2e584 | 4a0e3ffff54be178b377a4c18fe0ced2d44b7be6 | /tests/test_forbidden_ops.py | 455eb958543a82e631cf21bd2396fe317e29d2a1 | [] | no_license | WinVector/data_algebra | 608371904c0fcc99ffab7e0fe57c49dc75fd6b21 | 1e96817919ae891ba108d8d7471b2200b2528271 | refs/heads/main | 2023-04-13T20:11:18.682084 | 2023-04-10T14:09:41 | 2023-04-10T14:09:41 | 203,080,133 | 113 | 5 | null | null | null | null | UTF-8 | Python | false | false | 1,729 | py |
import pytest
import data_algebra
from data_algebra.data_ops import *
import lark.exceptions
def test_forbidden_ops_raises():
with pytest.raises(lark.exceptions.UnexpectedToken):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x && y"}
)
with pytest.raises(lark.exceptions.UnexpectedToken):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x || y"}
)
with pytest.raises(lark.exceptions.UnexpectedCharacters): # not in grammar
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "! y"}
)
with pytest.raises(AttributeError): # objects don't implement ~
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "~ y"}
)
with pytest.raises(lark.exceptions.UnexpectedToken):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x = y"}
)
def test_forbidden_ops_inlines_left_alone():
assert 'x ** y' in str(TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x ** y"}
))
def test_forbidden_ops_inline():
with pytest.raises(ValueError):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x & y"}
)
with pytest.raises(ValueError):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x | y"}
)
with pytest.raises(ValueError):
TableDescription(table_name="d", column_names=["x", "y"]).extend(
{"z": "x ^ y"}
)
| [
"[email protected]"
] | |
ae917e55d2a596415e27aed0be505c99621cfeff | 1fe4f9eb9b1d756ad17e1ff6585e8ee7af23903c | /saleor/dashboard/brand/filters.py | 61af9c9da3dd49b1b7a5fa75b7bb664b1a2508f7 | [
"BSD-3-Clause"
] | permissive | Chaoslecion123/Diver | ab762e7e6c8d235fdb89f6c958488cd9b7667fdf | 8c5c493701422eada49cbf95b0b0add08f1ea561 | refs/heads/master | 2022-02-23T10:43:03.946299 | 2019-10-19T23:39:47 | 2019-10-19T23:39:47 | 216,283,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | from django import forms
from django.utils.translation import npgettext, pgettext_lazy
from django_filters import CharFilter, ChoiceFilter, OrderingFilter
from ...core.filters import SortedFilterSet
from ...brand.models import Brand
SORT_BY_FIELDS = {
'name': pgettext_lazy('Brand list sorting option', 'name')}
BOOLEAN_CHOICES = (
('1', pgettext_lazy('Is active filter choice', 'Yes')),
('0', pgettext_lazy('Is active filter choice', 'No')))
class BrandFilter(SortedFilterSet):
name = CharFilter(
label=pgettext_lazy('Brand list name filter label', 'Name'),
lookup_expr='icontains')
is_featured = ChoiceFilter(
label=pgettext_lazy('Brand list filter label', 'Is featured'),
choices=BOOLEAN_CHOICES,
empty_label=pgettext_lazy('Filter empty choice label', 'All'),
widget=forms.Select)
sort_by = OrderingFilter(
label=pgettext_lazy('Brand list sorting filter label', 'Sort by'),
fields=SORT_BY_FIELDS.keys(),
field_labels=SORT_BY_FIELDS)
class Meta:
model = Brand
fields = []
def get_summary_message(self):
counter = self.qs.count()
return npgettext(
'Number of matching records in the dashboard brands list',
'Found %(counter)d matching brand',
'Found %(counter)d matching brands',
number=counter) % {'counter': counter}
| [
"[email protected]"
] | |
96bdcfd571407017f3cf00cc586fc0bf1d529645 | e008427620158afc3fd92181d2e8d71a31174b22 | /conductor/blueprints/user/controllers.py | 4bcc107f244f83e7b423331e9b94c43df705a6b0 | [
"MIT"
] | permissive | openbudgets/os-conductor | 8ff336f4fd235c77d23850536701ca4ae2f9a716 | 6138f17b643a218f4782c76f5b2febd7171b44f3 | refs/heads/master | 2020-03-08T21:58:30.799707 | 2018-05-09T08:21:39 | 2018-05-09T08:21:39 | 128,419,191 | 1 | 0 | null | 2018-04-06T16:29:19 | 2018-04-06T16:29:19 | null | UTF-8 | Python | false | false | 9,493 | py | import os
import datetime
import logging
import json
import base64
import zlib
try:
import urllib.parse as urlparse
except ImportError:
import urlparse # silence pyflakes
import jwt
import requests
from flask_oauthlib.client import OAuth, OAuthException
from .models import get_permission, get_user, create_or_get_user, save_user
def readfile_or_default(filename, default):
try:
return open(filename).read().strip()
except IOError:
return default
try:
credentials = ''.join(os.environ.get('OS_CONDUCTOR_SECRETS_%d' % i)
for i in range(4)).encode('ascii')
credentials = base64.decodebytes(credentials)
credentials = zlib.decompress(credentials).decode('ascii')
credentials = json.loads(credentials)
except Exception as _:
credentials = {}
PUBLIC_KEY = credentials.get('public.pem', '''-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzSrV/SxRNKufc6f0GQIu
YMASgBCOiJW5fvCnGtVMIrWvBQoCFAp9QwRHrbQrQJiPg6YqqnTvGhWssL5LMMvR
8jXXOpFUKzYaSgYaQt1LNMCwtqMB0FGSDjBrbmEmnDSo6g0Naxhi+SJX3BMcce1W
TgKRybv3N3F+gJ9d8wPkyx9xhd3H4200lHk4T5XK5+LyAPSnP7FNUYTdJRRxKFWg
ZFuII+Ex6mtUKU9LZsg9xeAC6033dmSYe5yWfdrFehmQvPBUVH4HLtL1fXTNyXuz
ZwtO1v61Qc1u/j7gMsrHXW+4csjS3lDwiiPIg6q1hTA7QJdB1M+rja2MG+owL0U9
owIDAQAB
-----END PUBLIC KEY-----''')
PRIVATE_KEY = credentials.get('private.pem', '''-----BEGIN RSA PRIVATE KEY-----
MIIEpAIBAAKCAQEAzSrV/SxRNKufc6f0GQIuYMASgBCOiJW5fvCnGtVMIrWvBQoC
FAp9QwRHrbQrQJiPg6YqqnTvGhWssL5LMMvR8jXXOpFUKzYaSgYaQt1LNMCwtqMB
0FGSDjBrbmEmnDSo6g0Naxhi+SJX3BMcce1WTgKRybv3N3F+gJ9d8wPkyx9xhd3H
4200lHk4T5XK5+LyAPSnP7FNUYTdJRRxKFWgZFuII+Ex6mtUKU9LZsg9xeAC6033
dmSYe5yWfdrFehmQvPBUVH4HLtL1fXTNyXuzZwtO1v61Qc1u/j7gMsrHXW+4csjS
3lDwiiPIg6q1hTA7QJdB1M+rja2MG+owL0U9owIDAQABAoIBAHgA7ytniZQSMnDW
szsRgIkMr4WCqawQT3CFWGikjCTdOiLraK3KONxDG53pfUcKNR9eySPsw5HxTZIP
rDE9dm6CuYJDUQT5X0Ue7qtffsa7UmFxVPVBUPnFroDgiFHjp01HFysmF3X7dYJ/
Fys4FDwK2rUxoXcnhkO7c5taErAPhpmv+QncVBkouQ3bB78av6cHdQfo+7PcvYRP
x6iDPAjMpz1wF1Fkd9mSHadjuqlC3FubbwEK5nTuSl4nPULK7KaCv9NjxyzTUi23
DWk9QCv+peIK/1h75cbB9eVvZayHlFlVNtD7Mrx5rediWABSqvNLRv/aZ0/o5+FM
1cxiYPECgYEA9AEr60CPlW9vBOacCImnWHWEH/UEwi4aNTBxpZEWRuN0HnmB+4Rt
1b+7LoX6olVBN1y8YIwzkDOCVblFaT+THBNiE7ABwB87c0jYd2ULQszqrebjXPoz
8q7MqghD+4iDfvP2QmivpadfeGGzYFI49b7W5c/Iv4w0oWgutib+hDsCgYEA10Dk
hMwg61q6YVAeTIqnV7zujfzTIif9AkePAfNLolLdn0Bx5LS6oPxeRUxyy4mImwrf
p6yZGOX/7ocy7rQ3X/F6fuxwuGa74PNZPwlLuD7UUPr//OPuQihoDKvL+52XWA5U
Q09sXK+KlvuH4DJ5UsHC9kgATyuGNUOeXYBHHbkCgYEA78Zq8x2ZOz6quQUolZc3
dEzezkyHJY4KQPRe6VUesAB5riy3F4M2L5LejMQp2/WtRYsCrll3nh+P109dryRD
GpbNjQ0rWzEVyZ7u4LzRiQ43GzbFfCt+et9czUWcEIRAu7Ne7jlTSZSk03Ymv+Ns
h8jGAkTiP6C2Y1oudN7ywtsCgYBAWIa3Z+oDUQjcJD4adWxW3wSU71oSINASSV/n
nloiuRDFFVe2nYwYqbhokNTUIVXzuwlmr0LI3aBnJoVENB1FkgMjQ/ziMtvBAB3S
qS24cxe26YFykJRdtIR+HTEKE271hLsNsAVdo6ATSDey/oOkCIYGZzmocQNaks8Z
dkpMCQKBgQCfZ75r1l/Hzphb78Ygf9tOz1YUFqw/xY9jfufW4C/5SgV2q2t/AZok
LixyPP8SzJcH20iKdc9kS7weiQA0ldT2SYv6VT7IqgQ3i/qYdOmaggjBGaIuIB/B
QZOJBnaSMVJFf/ZO1/1ilGVGfZZ3TMOA1TJlcTZisk56tRTbkivL9Q==
-----END RSA PRIVATE KEY-----''')
GOOGLE_KEY = credentials.get('google.key', 'google consumer key')
GOOGLE_SECRET = credentials.get('google.secret.key',
'google consumer secret')
LIBJS = readfile_or_default(os.path.join(os.path.dirname(__file__),
'lib',
'lib.js'),
'alert("error");')
oauth = OAuth()
def _google_remote_app():
if 'google' not in oauth.remote_apps:
oauth.remote_app(
'google',
base_url='https://www.googleapis.com/oauth2/v1/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={
'scope': 'email profile',
},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
consumer_key=GOOGLE_KEY,
consumer_secret=GOOGLE_SECRET)
return oauth.google
def _get_user_profile(access_token):
if access_token is None:
return None
headers = {'Authorization': 'OAuth {}'.format(access_token)}
response = requests.get('https://www.googleapis.com/oauth2/v1/userinfo',
headers=headers)
if response.status_code == 401:
return None
return response.json()
def authenticate(token, next, callback_url):
"""Check if user is authenticated
"""
if token is not None:
try:
token = jwt.decode(token, PRIVATE_KEY)
except jwt.InvalidTokenError:
token = None
if token is not None:
userid = token['userid']
user = get_user(userid)
if user is not None:
ret = {
'authenticated': True,
'profile': user
}
return ret
# Otherwise - not authenticated
provider = 'google'
state = {
'next': next,
'provider': provider,
'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=10),
'nbf': datetime.datetime.utcnow()
}
state = jwt.encode(state, PRIVATE_KEY)
google_login_url = _google_remote_app() \
.authorize(callback=callback_url, state=state).headers['Location']
ret = {
'authenticated': False,
'providers': {
'google': {
'url': google_login_url
}
}
}
return ret
def _update_next_url(next_url, client_token):
if client_token is None:
return next_url
url_parts = list(urlparse.urlparse(next_url))
query = dict(urlparse.parse_qsl(url_parts[4]))
query.update({'jwt': client_token})
url_parts[4] = urlparse.urlencode(query)
next_url = urlparse.urlunparse(url_parts)
return next_url
def _get_token_from_profile(provider, profile):
if profile is None:
return None
provider_id = profile['id']
name = profile['name']
email = profile['email']
avatar_url = profile['picture']
userid = '%s:%s' % (provider, provider_id)
user = create_or_get_user(userid, name, email, avatar_url)
token = {
'userid': user['idhash'],
'exp': (datetime.datetime.utcnow() +
datetime.timedelta(days=14))
}
client_token = jwt.encode(token, PRIVATE_KEY)
return client_token
def oauth_callback(state, callback_url,
set_session=lambda k, v: None):
"""Callback from google
"""
try:
app = _google_remote_app()
set_session('%s_oauthredir' % app.name, callback_url)
resp = app.authorized_response()
except OAuthException as e:
resp = e
if isinstance(resp, OAuthException):
logging.error("OAuthException: %r", resp.data, exc_info=resp)
resp = None
try:
state = jwt.decode(state, PRIVATE_KEY)
except jwt.InvalidTokenError:
state = {}
next_url = '/'
provider = state.get('provider')
next_url = state.get('next', next_url)
if resp is not None and provider is not None:
access_token = resp.get('access_token')
profile = _get_user_profile(access_token)
client_token = _get_token_from_profile(provider, profile)
# Add client token to redirect url
next_url = _update_next_url(next_url, client_token)
return next_url
def update(token, username):
"""Update a user
"""
err = None
if token is not None:
try:
token = jwt.decode(token, PRIVATE_KEY)
except jwt.InvalidTokenError:
token = None
err = 'Not authenticated'
else:
err = 'No token'
if token is not None:
userid = token['userid']
user = get_user(userid)
if user is not None:
dirty = False
if username is not None:
if user.get('username') is None:
user['username'] = username
dirty = True
else:
err = 'Cannot modify username, already set'
if dirty:
save_user(user)
else:
err = 'Unknown User'
ret = {'success': err is None}
if err is not None:
ret['error'] = err
return ret
def authorize(token, service):
"""Return user authorization for a service
"""
if token is not None and service is not None:
try:
token = jwt.decode(token, PRIVATE_KEY)
except jwt.InvalidTokenError:
token = None
if token is not None:
userid = token['userid']
service_permissions = get_permission('*', service)
user_permissions = get_permission(userid, service)
permissions = {}
if service_permissions is not None:
permissions.update(service_permissions)
if user_permissions is not None:
permissions.update(user_permissions)
ret = {
'userid': userid,
'permissions': permissions,
'service': service
}
token = jwt.encode(ret, PRIVATE_KEY, algorithm='RS256')\
.decode('ascii')
ret['token'] = token
return ret
ret = {
'permissions': {}
}
return ret
def public_key():
return PUBLIC_KEY
def lib():
return LIBJS
| [
"[email protected]"
] | |
4de65748c9746a5924fdb0a8c063ac3daef292d1 | aa0d55b2aa22da0af6545ce0da46d04dbdc3bffc | /cpgames/core/games/pingpong/modules/sprites.py | 5f00745d148bc0bb48b7b36a20e715cd42b66d90 | [
"Apache-2.0"
] | permissive | cyanghsieh/Games | 19fdad463cf12cbd503a399ed2700c0dae615714 | 07767df6d181b9eae89ce0a8b883d19afb450cc1 | refs/heads/master | 2023-05-11T11:11:09.777569 | 2023-02-22T14:28:18 | 2023-02-22T14:28:18 | 283,113,319 | 0 | 0 | MIT | 2020-07-28T05:49:13 | 2020-07-28T05:49:12 | null | UTF-8 | Python | false | false | 3,664 | py | '''
Function:
一些必要的精灵类
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import random
import pygame
from .utils import loadImage
'''乒乓球'''
class Ball(pygame.sprite.Sprite):
def __init__(self, imgpath, cfg, **kwargs):
pygame.sprite.Sprite.__init__(self)
self.cfg = cfg
self.image = loadImage(imgpath)
self.rect = self.image.get_rect()
self.reset()
'''移动'''
def move(self, ball, racket_left, racket_right, hit_sound, goal_sound):
self.rect.left = self.rect.left + self.speed * self.direction_x
self.rect.top = min(max(self.rect.top + self.speed * self.direction_y, 0), self.cfg.SCREENSIZE[1] - self.rect.height)
# 撞到球拍
if pygame.sprite.collide_rect(ball, racket_left) or pygame.sprite.collide_rect(ball, racket_right):
self.direction_x, self.direction_y = -self.direction_x, random.choice([1, -1])
self.speed += 1
scores = [0, 0]
hit_sound.play()
# 撞到上侧的墙
elif self.rect.top == 0:
self.direction_y = 1
self.speed += 1
scores = [0, 0]
# 撞到下侧的墙
elif self.rect.top == self.cfg.SCREENSIZE[1] - self.rect.height:
self.direction_y = -1
self.speed += 1
scores = [0, 0]
# 撞到左边的墙
elif self.rect.left < 0:
self.reset()
racket_left.reset()
racket_right.reset()
scores = [0, 1]
goal_sound.play()
# 撞到右边的墙
elif self.rect.right > self.cfg.SCREENSIZE[0]:
self.reset()
racket_left.reset()
racket_right.reset()
scores = [1, 0]
goal_sound.play()
# 普通情况
else:
scores = [0, 0]
return scores
'''初始化'''
def reset(self):
self.rect.centerx = self.cfg.SCREENSIZE[0] // 2
self.rect.centery = random.randrange(self.rect.height // 2, self.cfg.SCREENSIZE[1] - self.rect.height // 2)
self.direction_x = random.choice([1, -1])
self.direction_y = random.choice([1, -1])
self.speed = 1
'''绑定到屏幕上'''
def draw(self, screen):
screen.blit(self.image, self.rect)
'''乒乓球拍'''
class Racket(pygame.sprite.Sprite):
def __init__(self, imgpath, type_, cfg, **kwargs):
pygame.sprite.Sprite.__init__(self)
self.cfg = cfg
self.type_ = type_
self.image = loadImage(imgpath, False)
self.rect = self.image.get_rect()
self.reset()
'''移动'''
def move(self, direction):
if direction == 'UP':
self.rect.top = max(0, self.rect.top - self.speed)
elif direction == 'DOWN':
self.rect.bottom = min(self.cfg.SCREENSIZE[1], self.rect.bottom + self.speed)
else:
raise ValueError('[direction] in Racket.move is %s, expect %s or %s...' % (direction, 'UP', 'DOWN'))
'''电脑自动移动'''
def automove(self, ball):
if ball.rect.centery - 25 > self.rect.centery:
self.move('DOWN')
if ball.rect.centery + 25 < self.rect.centery:
self.move('UP')
'''初始化'''
def reset(self):
# 左/右边的拍
self.rect.centerx = self.cfg.SCREENSIZE[0] - self.rect.width // 2 if self.type_ == 'RIGHT' else self.rect.width // 2
self.rect.centery = self.cfg.SCREENSIZE[1] // 2
# 速度
self.speed = 5
'''绑定到屏幕上'''
def draw(self, screen):
screen.blit(self.image, self.rect) | [
"[email protected]"
] | |
3a8bab67ec8ceee72f60fd6eb8163fcbd1d325f5 | 77717d0024c8597fec83600259ea5547abbc183a | /demo/image_demo.py | 4a35a75b5f67f853a69af3ad51dc8550ac08a6b6 | [
"Apache-2.0"
] | permissive | fengyouliang/wheat_detection | 0a090ef5eda7f2c5463996f4795f9ce06dd04050 | d056123426a1260c29b486cbb8e44a88a0a3c5bc | refs/heads/master | 2022-11-17T15:09:29.113493 | 2020-07-18T13:47:34 | 2020-07-18T13:47:34 | 276,532,878 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 932 | py | from argparse import ArgumentParser
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
def main():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='bbox score threshold')
args = parser.parse_args()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
result = inference_detector(model, args.img)
# show the results
show_result_pyplot(model, args.img, result, score_thr=args.score_thr)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
cbab46f2372b7b4738133a09e2c904eba5e527ca | 2caf6885511af24443e22aaa43cd679d694f6f80 | /note/download_note/first_month/day15/my_project/skill_system/skill_deployer.py | 0ebb3c60b4ac54c50d95e2cf3c952ee6d6af1563 | [] | no_license | nandadao/Python_note | 7f9ba54a73af05c935b4f7e24cacb728859a6c69 | abddfc2e9a1704c88867cff1898c9251f59d4fb5 | refs/heads/master | 2020-11-25T18:29:50.607670 | 2019-12-19T01:28:02 | 2019-12-19T01:28:02 | 228,793,207 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py |
# 导入模块成功的唯一条件:
# sys.path + 导入路径 可以正确定位模块
import sys
sys.path.append('/home/tarena/month01/day15/my_project')
print(sys.path)
from common.list_helper import ListHelper
class SkillDeployer:
def generate_skill(self):
print("SkillDeployer -- generate_skill")
ListHelper.fun01() | [
"[email protected]"
] | |
1bae11f4339204618d4282ee3fc377f918457cdd | 1406e2274c55ade3b105204ee5493fed5dcbf0e0 | /tensorflow_transform/beam/combiner_packing_util.py | 5ceb65486ecb0815b8da0a88e830806699d9df70 | [
"Apache-2.0"
] | permissive | akamil-etsy/transform | 76553cfe3f49eff26b74ba49bb0879830ede354a | 4c89b650004ecd4f9e385cc7f828ae4726dde20f | refs/heads/master | 2023-06-08T16:50:23.109029 | 2021-07-01T22:37:28 | 2021-07-01T22:38:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,416 | py | # Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to perform combiner packing optimization.
Packing accumulate combines:
a) First, we visit the TFT graph to gather all the combine accumulate nodes that
can be packed under the same grandparent node (the parent is an
ExtractFronDict node which will now follow the accumulate node).
b) Second, we visit the graph to replace the individual combine accumulate nodes
with the packed node.
Packing merge combines:
a) First, we visit the TFT graph to gather all the combine merges that can be
packed (i.e., all the combine merges within a TFT phase). We currently do
this packing only when there is a single phase pre-processing function.
b) Since the inputs to the flatten node (which flattens the output of the
combine accumulates) before the packed merge come from different paths, we
add redundant flatten and packed merge nodes as and when we visit a new input
of this flatten node. At the end of this traversal, we would have one final
packed merge node with a corresponding flatten node having all the needed
inputs, and in addition to this we would have a set of redundant packed merge
and flatten nodes which needs to be removed.
c) Finally, we remove the redundant flatten and packed merge nodes.
"""
import collections
from tensorflow_transform import analyzer_nodes
from tensorflow_transform import nodes
from tensorflow_transform.beam import beam_nodes
# TODO(https://issues.apache.org/jira/browse/SPARK-22674): Switch to
# `collections.namedtuple` or `typing.NamedTuple` once the Spark issue is
# resolved.
from tfx_bsl.types import tfx_namedtuple
# Used for debugging only. This will point to the most recent graph built.
_ANALYSIS_GRAPH = None
_CombinerOpWrapper = tfx_namedtuple.namedtuple('_CombinerOpWrapper',
['combiner', 'keys', 'label'])
class _ValidationVisitor(nodes.Visitor):
"""Visitor to determine if a node is ready to run."""
def __init__(self):
self._visited_operation_def_labels = set()
def validate_operation_def(self, operation_def):
assert operation_def.label not in self._visited_operation_def_labels
self._visited_operation_def_labels.add(operation_def.label)
def validate_value(self, value):
assert isinstance(value, nodes.ValueNode)
class _InspectAccumulateCombineVisitor(_ValidationVisitor):
"""A visitor that inspects the graph and looks for combine nodes.
As this visitor visits the TFT Beam Graph, we group together all the
packable combine nodes. Specifically, we look for the following path:
ExtractFromDict --> CacheableCombineAccumulate
The combines under the same grand parent can be packed together.
In this visitor, we group all the packable combines for each unique
grand parent node and save their reference in the `packable_combines` class
attribute.
"""
def __init__(self):
super().__init__()
# Group all packable combines. We pack all the combines that have the same
# grand parent.
# {grand_parent_label: List of packable _CombinerOpWrapper's}
self.packable_combines = collections.defaultdict(list)
def visit(self, operation_def, input_values):
self.validate_operation_def(operation_def)
self._maybe_add_packable_combine(operation_def, input_values)
return nodes.OperationNode(operation_def, input_values).outputs
def _maybe_add_packable_combine(self, operation_def, input_values):
# We cannot pack the per-key combine analyzers as the key may be different
# for each analyzer.
if not isinstance(operation_def, analyzer_nodes.CacheableCombineAccumulate):
return
assert len(input_values) == 1
# Get the ExtractFromDict parent node of the current
# CacheableCombineAccumulate node.
parent = input_values[0].parent_operation
if not isinstance(parent.operation_def, beam_nodes.ExtractFromDict):
return
assert len(parent.inputs) == 1
# Get the parent of the current ExtractFromDict node.
grand_parent = parent.inputs[0].parent_operation
assert isinstance(grand_parent.operation_def, beam_nodes.ApplySavedModel)
# This is a packable combine.
grand_parent_label = grand_parent.operation_def.label
self.packable_combines[grand_parent_label].append(_CombinerOpWrapper(
combiner=operation_def.combiner,
keys=parent.operation_def.keys,
label=operation_def.label))
class _PackAccumulateCombineVisitor(_ValidationVisitor):
r"""A visitor that packs combine nodes in the graph.
This visitor takes the grouped combines and performs the packing of those
combines.
Before packing
GrandParentNode
/ \
ExtractFromDict1 ExtractFromDict2
/ \
Combine1 Combine2
After packing
GrandParentNode
|
PackedCombine
/ \
ExtractFromDict1' ExtractFromDict2'
The ExtractFromDict nodes after packing extracts the accumulator corresponding
to the individual combines.
"""
def __init__(self, packable_combines):
super().__init__()
self._packable_combines = packable_combines
self._combine_to_grand_parent = {}
for grand_parent_label, group in self._packable_combines.items():
for combine_op in group:
self._combine_to_grand_parent[combine_op.label] = grand_parent_label
# Cache the packed combine node.
# Grand parent node label -> Packed combine node
self._packed_combine_cache = {}
def visit(self, operation_def, input_values):
self.validate_operation_def(operation_def)
# If we see a combine node which can be packed, create the packed combine
# node and cache it as we will use the same packed node for all the combines
# in the group.
if operation_def.label in self._combine_to_grand_parent:
return self._get_packed_combine(operation_def, input_values)
return nodes.OperationNode(operation_def, input_values).outputs
def _get_packed_combine(self, operation_def, input_values):
grand_parent_label = self._combine_to_grand_parent[operation_def.label]
# If we are seeing a combine from a group for the first time, create the
# the packed combine node and cache it.
if grand_parent_label not in self._packed_combine_cache:
# Get the grand parent node of the CacheableCombineAccumulate node.
# We will make this node as the parent of the
# PackedCombineAccumulate node.
assert len(input_values) == 1
parent_node = input_values[0]
assert isinstance(parent_node.parent_operation.operation_def,
beam_nodes.ExtractFromDict)
assert len(parent_node.parent_operation.inputs) == 1
grand_parent_node = parent_node.parent_operation.inputs[0]
assert (grand_parent_node.parent_operation.operation_def.label ==
grand_parent_label)
self._packed_combine_cache[grand_parent_label] = (
nodes.apply_operation(
analyzer_nodes.PackedCombineAccumulate,
grand_parent_node,
combiners=self._packable_combines[grand_parent_label],
label='PackedCombineAccumulate[{}]'.format(grand_parent_label)))
# For the current combine, create the ExtractFromDict node which
# extracts the accumulator corresponding to this combine from the
# packed combine output.
result = nodes.apply_operation(
beam_nodes.ExtractFromDict,
self._packed_combine_cache[grand_parent_label],
keys=operation_def.label, label=operation_def.label)
return (result,)
_COMBINE_PARENT_NODE_TYPES = (
beam_nodes.ExtractFromDict, beam_nodes.Flatten, analyzer_nodes.DecodeCache)
class _InspectMergeCombineVisitor(_ValidationVisitor):
"""A visitor that inspects the graph and looks for merge combine nodes."""
def __init__(self):
super().__init__()
# Gather all the packable merge combines.
# Dict {ExtractCombineMergeOutputs (child of CacheableCombineMerge) label:
# _CombinerOpWrapper}
self.packable_combine_extract_outputs = collections.OrderedDict()
def visit(self, operation_def, input_values):
self.validate_operation_def(operation_def)
self._maybe_add_packable_combine(operation_def, input_values)
return nodes.OperationNode(operation_def, input_values).outputs
def _maybe_add_packable_combine(self, operation_def, input_values):
if not isinstance(operation_def, analyzer_nodes.ExtractCombineMergeOutputs):
return
# Verify we have a CacheableCombineMerge parent.
parent = input_values[0].parent_operation
if not isinstance(parent.operation_def,
analyzer_nodes.CacheableCombineMerge):
return
assert len(parent.inputs) == 1
grand_parent = parent.inputs[0].parent_operation
# We look for packable combines. Specifically, CacheableCombineMerge nodes
# whose parent is one of the type in _COMBINE_PARENT_NODE_TYPES.
if isinstance(grand_parent.operation_def, _COMBINE_PARENT_NODE_TYPES):
# This is a packable combine.
self.packable_combine_extract_outputs[operation_def.label] = (
_CombinerOpWrapper(
combiner=parent.operation_def.combiner,
keys=(parent.operation_def.label,),
label=parent.operation_def.label))
class _PackMergeCombineVisitor(_ValidationVisitor):
r"""A visitor that inspects the graph and looks for combine nodes.
This visitor takes the grouped combines and performs the packing of those
combines.
Before packing
... ...
/ \
Combine1 Combine2
/ \
ExtractCombineMergeOutputs1 ExtractCombineMergeOutputs2
After packing
... ...
/ \
AddKey1 AddKey2
\ /
\ /
\ /
Flatten
|
PackedCombineMerge
/ \
ExtractFromDict1 ExtractFromDict2
/ \
ExtractPackedCombineMergeOutputs1 ExtractPackedCombineMergeOutputs2
Since the inputs to the final flatten node before the packed merge come from
different paths, we add redundant flatten and packed merge nodes each time we
visit a new input of the final flatten node. At the end of this traversal,
we would have one final packed merge node with a corresponding flatten node
having all the needed inputs, and in addition to this we would have a set of
redundant packed merge and flatten nodes which needs to be removed.
"""
def __init__(self, packable_combine_extract_outputs):
super().__init__()
self._packable_combine_extract_outputs = packable_combine_extract_outputs
# Gather all the input nodes that we need to flatten to be passed as input
# to the packed merge node.
self._flatten_inputs = []
# Keep track of the label of the final packed merge combine node.
self.final_packed_merge_combine_label = None
def visit(self, operation_def, input_values):
self.validate_operation_def(operation_def)
# We look for the ExtractOutputs node of packable combines
if operation_def.label in self._packable_combine_extract_outputs:
return self._add_flatten_placeholder(operation_def, input_values)
return nodes.OperationNode(operation_def, input_values).outputs
def _add_flatten_placeholder(self, operation_def, input_values):
assert isinstance(operation_def, analyzer_nodes.ExtractCombineMergeOutputs)
parent = input_values[0].parent_operation
assert isinstance(parent.operation_def,
analyzer_nodes.CacheableCombineMerge)
packed_combine = self._get_packed_combine(
parent.operation_def, parent.inputs)
# For the current combine, create the ExtractFromDict node which
# extracts the accumulator corresponding to this combine from the
# packed combine output.
extract_dict_node = nodes.apply_operation(
beam_nodes.ExtractFromDict,
packed_combine,
keys=parent.operation_def.label,
label='ExtractFromDict[{}]'.format(parent.operation_def.label))
# Create the new ExtractPackedCombineMergeOutputs node.
return nodes.apply_multi_output_operation(
analyzer_nodes.ExtractPackedCombineMergeOutputs,
extract_dict_node,
output_tensor_info_list=operation_def.output_tensor_infos,
label='ExtractPackedCombineMergeOutputs[{}]'.format(
parent.operation_def.label)
)
def _get_packed_combine(self, operation_def, input_values):
for value in input_values:
keyed_value = nodes.apply_operation(
analyzer_nodes.AddKey,
value,
key=operation_def.label,
label='AddKey[{}]'.format(operation_def.label))
self._flatten_inputs.append(keyed_value)
# TODO(b/134414978): When we add support for multi-phase merge packing,
# add phase number to the flatten and packed combine labels.
flatten_label = 'FlattenInputForPackedCombineMerge[{}]'.format(
len(self._flatten_inputs))
flatten_node = nodes.apply_operation(
beam_nodes.Flatten, *self._flatten_inputs, label=flatten_label)
packed_combine_label = 'PackedCombineMerge[{}]'.format(
len(self._flatten_inputs))
packed_combine = nodes.apply_operation(
analyzer_nodes.PackedCombineMerge,
flatten_node,
combiners=list(self._packable_combine_extract_outputs.values()),
label=packed_combine_label)
self.final_packed_merge_combine_label = packed_combine_label
return packed_combine
_TensorBindingInfo = tfx_namedtuple.namedtuple('_TensorBindingInfo', [
'extract_from_dict_op_def', 'extract_outputs_op_def',
'tensor_binding_op_def', 'output_index'
])
class _RemoveRedundantPackedMergeCombineVisitor(_ValidationVisitor):
"""A visitor that inspects the graph and removes redundant merge nodes.
This visitor removes the redundant flatten and packed merge nodes added
by the _PackMergeCombineVisitor and reconstructs the descendants of the
removed nodes with the final flatten and packed merge node.
"""
def __init__(self, final_packed_merge_combine_label):
super().__init__()
self._final_packed_merge_combine_label = final_packed_merge_combine_label
def visit(self, operation_def, input_values):
self.validate_operation_def(operation_def)
if input_values and isinstance(operation_def, beam_nodes.CreateSavedModel):
# This will only be called once since this is a single phase analysis
# graph and in that case only the final CreateSavedModel node has inputs.
return self._remove_redundant_nodes(operation_def, input_values)
return nodes.OperationNode(operation_def, input_values).outputs
def _remove_redundant_nodes(self, operation_def, input_values):
# Input values to be used as input to CreateSavedModel.
# Since some of the input values are generated from the redundant nodes,
# those needs to be reconstructed with the final packed merge node.
reconstructed_input_values = []
redundant_values, non_redundant_values = (
self._get_redundant_and_non_redundant_input_values(input_values))
# Keep track of the final packed merge combine node. For those input nodes
# which are descendants of the redundant nodes, we would create a new node
# generated from the final packed merge combine node.
(final_packed_merge_combine, final_packed_merge_combine_tensor_bindings) = (
self._get_final_packed_combine_and_tensor_bindings(redundant_values))
reconstructed_input_values.extend(
final_packed_merge_combine_tensor_bindings)
# Add the non-redundant nodes to the input values.
reconstructed_input_values.extend(non_redundant_values)
# Keep track of the info needed to reconstruct the descendents of the
# redundant nodes.
to_be_created_tensor_bindings = (
self._get_to_be_created_tensor_bindings_info(redundant_values))
reconstructed_input_values.extend(self._create_tensor_bindings(
to_be_created_tensor_bindings, final_packed_merge_combine))
assert len(input_values) == len(reconstructed_input_values)
return nodes.OperationNode(
operation_def, tuple(reconstructed_input_values)).outputs
def _get_redundant_and_non_redundant_input_values(
self, input_values):
redundant_values, non_redundant_values = [], []
for value in input_values:
assert isinstance(value.parent_operation.operation_def,
beam_nodes.CreateTensorBinding)
extract_outputs = value.parent_operation.inputs[0]
# If its not from a packed combine node, this is a non-redundant value.
if not isinstance(extract_outputs.parent_operation.operation_def,
analyzer_nodes.ExtractPackedCombineMergeOutputs):
non_redundant_values.append(value)
else:
redundant_values.append(value)
return redundant_values, non_redundant_values
def _get_final_packed_combine_and_tensor_bindings(self, input_values):
final_packed_merge_combine = None
final_packed_merge_combine_tensor_bindings = []
for value in input_values:
extract_outputs = value.parent_operation.inputs[0]
# We have an input node generated from a packed combine merge.
extract_from_dict = extract_outputs.parent_operation.inputs[0]
packed_combine = extract_from_dict.parent_operation.inputs[0]
# If the input is generated from the final packed merge node, add it to
# the filtered inputs and keep track of the node for reconstruction of
# the other inputs.
if (packed_combine.parent_operation.operation_def.label ==
self._final_packed_merge_combine_label):
final_packed_merge_combine = packed_combine
final_packed_merge_combine_tensor_bindings.append(value)
return (final_packed_merge_combine,
final_packed_merge_combine_tensor_bindings)
def _get_to_be_created_tensor_bindings_info(self, input_values):
result = []
for value in input_values:
extract_outputs = value.parent_operation.inputs[0]
# We have an input node generated from a packed combine merge.
extract_from_dict = extract_outputs.parent_operation.inputs[0]
packed_combine = extract_from_dict.parent_operation.inputs[0]
# If the input is not generated from the final packed merge node, keep
# track of the node for reconstruction of the other inputs.
if (packed_combine.parent_operation.operation_def.label !=
self._final_packed_merge_combine_label):
# Store the info needed to reconstruct the input node.
result.append(_TensorBindingInfo(
extract_from_dict_op_def=
extract_from_dict.parent_operation.operation_def,
extract_outputs_op_def=
extract_outputs.parent_operation.operation_def,
tensor_binding_op_def=value.parent_operation.operation_def,
# Keep track of CreateTensorBinding node's input value index.
output_index=extract_outputs.value_index))
return result
def _create_tensor_bindings(self, to_be_created_tensor_bindings,
final_packed_merge_combine):
labels_to_new_nodes = {}
def _maybe_create_node(op_def, inputs):
if op_def.label in labels_to_new_nodes:
return labels_to_new_nodes[op_def.label]
new_node = nodes.OperationNode(op_def, inputs).outputs
labels_to_new_nodes[op_def.label] = new_node
return new_node
result = []
if to_be_created_tensor_bindings:
assert final_packed_merge_combine is not None
# Reconstruct the remaining inputs from the final packed merge node.
for tensor_binding_info in to_be_created_tensor_bindings:
extract_from_dict = _maybe_create_node(
tensor_binding_info.extract_from_dict_op_def,
(final_packed_merge_combine,))
extract_outputs = _maybe_create_node(
tensor_binding_info.extract_outputs_op_def,
extract_from_dict)
(tensor_binding,) = _maybe_create_node(
tensor_binding_info.tensor_binding_op_def,
(extract_outputs[tensor_binding_info.output_index],))
result.append(tensor_binding)
return result
def _update_cache_value_node_references(cache_value_nodes, traverser):
"""Updates value node references in the cache."""
if cache_value_nodes:
cache_value_nodes = {
key: traverser.visit_value_node(value_node)
for key, value_node in cache_value_nodes.items()
}
return cache_value_nodes
def perform_combiner_packing_optimization(saved_model_future,
cache_value_nodes, num_phases):
"""Optimizes the graph by packing possible combine nodes."""
# Inspect the graph to identify all the packable combines.
inspect_acc_combine_visitor = _InspectAccumulateCombineVisitor()
inspect_acc_combine_traverser = nodes.Traverser(inspect_acc_combine_visitor)
_ = inspect_acc_combine_traverser.visit_value_node(saved_model_future)
packable_combines = inspect_acc_combine_visitor.packable_combines
# Do not pack if we have only a single combine in the group.
packable_combines = {
label: group for label, group in packable_combines.items()
if len(group) > 1
}
pack_acc_combine_visitor = _PackAccumulateCombineVisitor(packable_combines)
pack_acc_combine_traverser = nodes.Traverser(pack_acc_combine_visitor)
saved_model_future = pack_acc_combine_traverser.visit_value_node(
saved_model_future)
# Replace cache nodes to point to the corresponding new nodes.
cache_value_nodes = _update_cache_value_node_references(
cache_value_nodes, pack_acc_combine_traverser)
# TODO(b/134414978): Consider also packing the merges even when we have
# multiple phases.
if num_phases > 1:
return (saved_model_future, cache_value_nodes)
# Identify the merge combines that can be packed together.
inspect_merge_combine_visitor = _InspectMergeCombineVisitor()
inspect_merge_combine_traverser = nodes.Traverser(
inspect_merge_combine_visitor)
_ = inspect_merge_combine_traverser.visit_value_node(saved_model_future)
# Only pack if we have more than one merge combines.
if len(inspect_merge_combine_visitor.packable_combine_extract_outputs) <= 1:
return (saved_model_future, cache_value_nodes)
# Add flatten and packed merge nodes.
pack_merge_combine_visitor = _PackMergeCombineVisitor(
packable_combine_extract_outputs=
inspect_merge_combine_visitor.packable_combine_extract_outputs)
pack_merge_combine_traverser = nodes.Traverser(pack_merge_combine_visitor)
saved_model_future = pack_merge_combine_traverser.visit_value_node(
saved_model_future)
# Replace cache nodes to point to the corresponding new nodes.
cache_value_nodes = _update_cache_value_node_references(
cache_value_nodes, pack_merge_combine_traverser)
# Remove redundant flatten and packed merge nodes.
remove_redundant_visitor = _RemoveRedundantPackedMergeCombineVisitor(
final_packed_merge_combine_label=
pack_merge_combine_visitor.final_packed_merge_combine_label)
remove_redundant_traverser = nodes.Traverser(remove_redundant_visitor)
saved_model_future = remove_redundant_traverser.visit_value_node(
saved_model_future)
# Replace cache nodes to point to the corresponding new nodes.
cache_value_nodes = _update_cache_value_node_references(
cache_value_nodes, remove_redundant_traverser)
return (saved_model_future, cache_value_nodes)
| [
"[email protected]"
] | |
49d39d44fc161e989c2e466a7903314ea706eff8 | fab7b6e422b74424fb59398635f74faca9ff5a58 | /waimak_extended_boundry/model_and_NSMC_build/pumping uncertainty.py | 402dbd2253b25d06ce083a921ee8614cb69809c8 | [] | no_license | hansonmcoombs/Waimakariri-Model-Ashley-to-Selwyn | c7a56a2ebd0d421c9679cb4a16ae319dfb2041b1 | c96c2663b010975ec08d42840fbc7970f3c2b085 | refs/heads/master | 2023-05-29T10:57:33.916912 | 2020-04-23T21:32:21 | 2020-04-23T21:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,168 | py | # -*- coding: utf-8 -*-
"""
Author: matth
Date Created: 11/08/2017 3:15 PM
"""
from __future__ import division
import numpy as np
import pandas as pd
from core.ecan_io import rd_sql, sql_db
mike = pd.read_hdf(r"P:\Groundwater\Waimakariri\Groundwater\Numerical GW model\supporting_data_for_scripts\ex_bd_va_sdp\m_ex_bd_inputs\sd_est_all_mon_vol.h5")
mike = mike.loc[(mike.time >= pd.datetime(2008, 1, 1)) & (mike.take_type == 'Take Groundwater')]
mike.loc[:, 'd_in_m'] = mike.time.dt.daysinmonth
data = mike.groupby('wap').aggregate({'usage_est': np.sum, 'mon_allo_m3': np.sum, 'crc': ','.join, 'd_in_m': np.sum})
data.loc[:, 'flux'] = data.loc[:, 'usage_est'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
data.loc[:, 'flux_cav'] = data.loc[:, 'mon_allo_m3'] / (mike.time.max() - pd.datetime(2007, 12, 31)).days
well_details = rd_sql(**sql_db.wells_db.well_details)
well_details = well_details.set_index('WELL_NO')
out_data = pd.merge(data, pd.DataFrame(well_details.loc[:, 'WMCRZone']), left_index=True, right_index=True)
out_data = out_data.loc[np.in1d(out_data.WMCRZone, [7, 8])]
temp = out_data.flux/out_data.flux_cav
temp2 = temp[temp<=1]
print 'done' | [
"[email protected]"
] | |
24e922c6f1f46bd3e4e246977f5c74031cbc8878 | a8da7aa38ea0b094329f9ad25e2ed1d41b712269 | /requests-html/code/basic.py | b1292fd7f8d7b26ec6d3f57c64887cf20994ed91 | [] | no_license | ikedaosushi/python-sandbox | ef04fbece2aad56975a3abef4cf58badb53f37c0 | d887b22f46635f053f9b5e3ea33c538058e4267c | refs/heads/master | 2021-11-23T17:44:06.296229 | 2020-07-26T08:56:12 | 2020-07-26T08:56:12 | 154,621,904 | 13 | 2 | null | 2021-11-16T22:15:17 | 2018-10-25T06:37:01 | Jupyter Notebook | UTF-8 | Python | false | false | 122 | py | from requests_html import HTMLSession
session = HTMLSession()
resp = session.get("https://www.python.jp/")
resp.html.url | [
"[email protected]"
] | |
cabba0c30aedd1e6ddf910614a74a65d4b90f2ce | 7066555f4c2ff9b405754d2e793b97bf04b6ab98 | /data_structure/arrays_and_strings/283_move_zeroes.py | 99d522304d65fe65ab51f1bbd92f5af51da2a6ee | [] | no_license | yangtao0304/hands-on-programming-exercise | c0d0fe324ffaf73c7b4c45aba721a245a8cc9ce2 | cc7740026c3774be21ab924b99ae7596ef20d0e4 | refs/heads/master | 2020-09-11T02:05:51.305196 | 2020-03-19T03:45:53 | 2020-03-19T03:45:53 | 221,904,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | class Solution(object):
def move_zeroes(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
i = 0
for idx in range(len(nums)):
if nums[idx] != 0:
nums[i] = nums[idx]
i += 1
for idx in range(i, len(nums)):
nums[idx] = 0
def move_zeroes_2(self, nums):
"""
:type nums: List[int]
:rtype: None Do not return anything, modify nums in-place instead.
"""
i = 0
for idx in range(len(nums)):
if nums[idx] != 0:
# swap
nums[i], nums[idx] = nums[idx], nums[i]
i += 1
| [
"[email protected]"
] | |
e8b238e8401c2b6ef7e6335fbc2bd0916d48419c | 32eeb97dff5b1bf18cf5be2926b70bb322e5c1bd | /benchmark/redreader/testcase/firstcases/testcase3_024.py | 271a75bb52d16f7f9a87961616e0f6992ce81b86 | [] | no_license | Prefest2018/Prefest | c374d0441d714fb90fca40226fe2875b41cf37fc | ac236987512889e822ea6686c5d2e5b66b295648 | refs/heads/master | 2021-12-09T19:36:24.554864 | 2021-12-06T12:46:14 | 2021-12-06T12:46:14 | 173,225,161 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,525 | py | #coding=utf-8
import os
import subprocess
import time
import traceback
from appium import webdriver
from appium.webdriver.common.touch_action import TouchAction
from selenium.common.exceptions import NoSuchElementException, WebDriverException
desired_caps = {
'platformName' : 'Android',
'deviceName' : 'Android Emulator',
'platformVersion' : '4.4',
'appPackage' : 'org.quantumbadger.redreader',
'appActivity' : 'org.quantumbadger.redreader.activities.MainActivity',
'resetKeyboard' : True,
'androidCoverage' : 'org.quantumbadger.redreader/org.quantumbadger.redreader.JacocoInstrumentation',
'noReset' : True
}
def command(cmd, timeout=5):
p = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
time.sleep(timeout)
p.terminate()
return
def getElememt(driver, str) :
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str)
return element
def getElememtBack(driver, str1, str2) :
for i in range(0, 2, 1):
try:
element = driver.find_element_by_android_uiautomator(str1)
except NoSuchElementException:
time.sleep(1)
else:
return element
for i in range(0, 5, 1):
try:
element = driver.find_element_by_android_uiautomator(str2)
except NoSuchElementException:
time.sleep(1)
else:
return element
os.popen("adb shell input tap 50 50")
element = driver.find_element_by_android_uiautomator(str2)
return element
def swipe(driver, startxper, startyper, endxper, endyper) :
size = driver.get_window_size()
width = size["width"]
height = size["height"]
try:
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
except WebDriverException:
time.sleep(1)
driver.swipe(start_x=int(width * startxper), start_y=int(height * startyper), end_x=int(width * endxper),
end_y=int(height * endyper), duration=2000)
return
# testcase024
try :
starttime = time.time()
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
element = getElememtBack(driver, "new UiSelector().text(\"books\")", "new UiSelector().className(\"android.widget.TextView\").instance(12)")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Refresh Posts\")")
TouchAction(driver).tap(element).perform()
element = getElememt(driver, "new UiSelector().className(\"android.widget.TextView\").description(\"Refresh Posts\")")
TouchAction(driver).long_press(element).release().perform()
element = getElememt(driver, "new UiSelector().resourceId(\"org.quantumbadger.redreader:id/actionbar_title_back_image\").className(\"android.widget.ImageView\")")
TouchAction(driver).tap(element).perform()
except Exception, e:
print 'FAIL'
print 'str(e):\t\t', str(e)
print 'repr(e):\t', repr(e)
print traceback.format_exc()
else:
print 'OK'
finally:
cpackage = driver.current_package
endtime = time.time()
print 'consumed time:', str(endtime - starttime), 's'
command("adb shell am broadcast -a com.example.pkg.END_EMMA --es name \"3_024\"")
jacocotime = time.time()
print 'jacoco time:', str(jacocotime - endtime), 's'
driver.quit()
if (cpackage != 'org.quantumbadger.redreader'):
cpackage = "adb shell am force-stop " + cpackage
os.popen(cpackage) | [
"[email protected]"
] | |
bb1b2964504bbee76a34c09f71ff3d2ff2ebd505 | c175c4e3560c6c66ec2b0c4b439cd586878b44a5 | /prplatform/submissions/tests/test_models.py | 3c2be9e4a42d51767fb62c1b5615a045a1c8072f | [
"MIT"
] | permissive | piehei/prplatform | fd30e2e388597583b9ef0e59462ea9643f7244ba | f3248b66019f207bb06a4681a62057e175408b3e | refs/heads/master | 2020-03-09T17:09:47.893706 | 2019-09-18T15:24:58 | 2019-09-18T15:24:58 | 128,902,940 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | from django.db import OperationalError
from django.test import TestCase
from prplatform.exercises.models import (
SubmissionExercise,
ReviewExercise,
)
from prplatform.submissions.models import (
OriginalSubmission,
ReviewSubmission,
)
from prplatform.submissions.reviewlock_models import ReviewLock
from prplatform.users.models import User
class ReviewExerciseTestCase(TestCase):
fixtures = [
'courses.yaml'
]
def setUp(self):
self.se = SubmissionExercise.objects.get(name='T1 TEXT')
self.course = self.se.course
self.re = ReviewExercise.objects.get(name='T1 TEXT REVIEW')
self.s1 = User.objects.get(username='student1')
self.s2 = User.objects.get(username='student2')
self.s3 = User.objects.get(username='student3')
def test_save_and_destroy_lock_reviewsubmission(self):
os = OriginalSubmission(course=self.course,
exercise=self.se,
submitter_user=self.s1,
text="jadajada")
os.save()
rl = ReviewLock(review_exercise=self.re,
user=self.s2,
original_submission=os)
rl.save()
rs = ReviewSubmission(course=self.course,
exercise=self.re,
submitter_user=self.s2,
reviewed_submission=os)
self.assertEqual(ReviewLock.objects.count(), 1)
rs.save_and_destroy_lock()
self.assertEqual(ReviewLock.objects.count(), 0)
rs2 = ReviewSubmission(course=self.course,
exercise=self.re,
submitter_user=self.s2,
reviewed_submission=os)
self.assertRaises(OperationalError,
rs2.save_and_destroy_lock)
rs2 = ReviewSubmission(course=self.course,
exercise=self.re,
submitter_user=self.s3,
reviewed_submission=os)
self.assertRaises(OperationalError,
rs2.save_and_destroy_lock)
| [
"[email protected]"
] | |
14b52891220f3fee7d733147a3f39618853e24d8 | 134178ca3575d30bc3314b2182cd1fc26ed0385f | /day2/ifs.py | c0b6b70bcd96f1bfe59ddbf964559e8cbef2a819 | [] | no_license | mpdevilleres/python-study-2021 | c19d50138158ccc6c18e96c5831546ce1ec03a0d | 36edc4711e0a39bc87eb84dd43b6ba058a726b20 | refs/heads/master | 2023-06-26T19:41:04.472116 | 2021-07-16T10:39:26 | 2021-07-16T10:39:26 | 378,050,476 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | # # defining if statements
#
# if (condition1):
# pass
# elif (condition2):
# pass
# else:
# pass
#
# # example
# if count == 1:
# print('count is not 0')
n = int(input("input a number: "))
if n % 2 == 0:
print(n, 'is divisible by 2')
elif n % 3 == 0:
print(n, 'is divisible by 3')
elif n % 5 == 0:
print(n, 'is divisible by 5')
else:
print(n, "undetermined")
# if it divisible by 2
# if it divisible by 3
# if it divisible by 5
# if it divisible by 7
| [
"[email protected]"
] | |
1c65de4d3f62ad7ca017c5b9b8c7e2fcc87ebe40 | eeb8f0d9d0b0413f945e57e3de119c3964fb6a89 | /epitools-env/lib/python3.8/site-packages/nltk/inference/nonmonotonic.py | 7dc92d724201bf595172cbd1faa75c947949c4ea | [] | no_license | Hillary05/EPITECH-DOCUMENTATION | 13ff301fa657ff9ffd55ef61e64647453eda7a8c | 4ea0bbef1d27003b7d5902cbdfdd41fbc9173b2c | refs/heads/master | 2023-05-10T12:43:09.237217 | 2021-06-29T11:41:05 | 2021-06-29T11:41:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,193 | py | # Natural Language Toolkit: Nonmonotonic Reasoning
#
# Author: Daniel H. Garrette <[email protected]>
#
# Copyright (C) 2001-2020 NLTK Project
# URL: <http://nltk.org>
# For license information, see LICENSE.TXT
"""
A module to perform nonmonotonic reasoning. The ideas and demonstrations in
this module are based on "Logical Foundations of Artificial Intelligence" by
Michael R. Genesereth and Nils J. Nilsson.
"""
from collections import defaultdict
from functools import reduce
from nltk.inference.prover9 import Prover9, Prover9Command
from nltk.sem.logic import (
VariableExpression,
EqualityExpression,
ApplicationExpression,
Expression,
AbstractVariableExpression,
AllExpression,
BooleanExpression,
NegatedExpression,
ExistsExpression,
Variable,
ImpExpression,
AndExpression,
unique_variable,
operator,
)
from nltk.inference.api import Prover, ProverCommandDecorator
class ProverParseError(Exception):
pass
def get_domain(goal, assumptions):
if goal is None:
all_expressions = assumptions
else:
all_expressions = assumptions + [-goal]
return reduce(operator.or_, (a.constants() for a in all_expressions), set())
class ClosedDomainProver(ProverCommandDecorator):
"""
This is a prover decorator that adds domain closure assumptions before
proving.
"""
def assumptions(self):
assumptions = [a for a in self._command.assumptions()]
goal = self._command.goal()
domain = get_domain(goal, assumptions)
return [self.replace_quants(ex, domain) for ex in assumptions]
def goal(self):
goal = self._command.goal()
domain = get_domain(goal, self._command.assumptions())
return self.replace_quants(goal, domain)
def replace_quants(self, ex, domain):
"""
Apply the closed domain assumption to the expression
- Domain = union([e.free()|e.constants() for e in all_expressions])
- translate "exists x.P" to "(z=d1 | z=d2 | ... ) & P.replace(x,z)" OR
"P.replace(x, d1) | P.replace(x, d2) | ..."
- translate "all x.P" to "P.replace(x, d1) & P.replace(x, d2) & ..."
:param ex: ``Expression``
:param domain: set of {Variable}s
:return: ``Expression``
"""
if isinstance(ex, AllExpression):
conjuncts = [
ex.term.replace(ex.variable, VariableExpression(d)) for d in domain
]
conjuncts = [self.replace_quants(c, domain) for c in conjuncts]
return reduce(lambda x, y: x & y, conjuncts)
elif isinstance(ex, BooleanExpression):
return ex.__class__(
self.replace_quants(ex.first, domain),
self.replace_quants(ex.second, domain),
)
elif isinstance(ex, NegatedExpression):
return -self.replace_quants(ex.term, domain)
elif isinstance(ex, ExistsExpression):
disjuncts = [
ex.term.replace(ex.variable, VariableExpression(d)) for d in domain
]
disjuncts = [self.replace_quants(d, domain) for d in disjuncts]
return reduce(lambda x, y: x | y, disjuncts)
else:
return ex
class UniqueNamesProver(ProverCommandDecorator):
"""
This is a prover decorator that adds unique names assumptions before
proving.
"""
def assumptions(self):
"""
- Domain = union([e.free()|e.constants() for e in all_expressions])
- if "d1 = d2" cannot be proven from the premises, then add "d1 != d2"
"""
assumptions = self._command.assumptions()
domain = list(get_domain(self._command.goal(), assumptions))
# build a dictionary of obvious equalities
eq_sets = SetHolder()
for a in assumptions:
if isinstance(a, EqualityExpression):
av = a.first.variable
bv = a.second.variable
# put 'a' and 'b' in the same set
eq_sets[av].add(bv)
new_assumptions = []
for i, a in enumerate(domain):
for b in domain[i + 1 :]:
# if a and b are not already in the same equality set
if b not in eq_sets[a]:
newEqEx = EqualityExpression(
VariableExpression(a), VariableExpression(b)
)
if Prover9().prove(newEqEx, assumptions):
# we can prove that the names are the same entity.
# remember that they are equal so we don't re-check.
eq_sets[a].add(b)
else:
# we can't prove it, so assume unique names
new_assumptions.append(-newEqEx)
return assumptions + new_assumptions
class SetHolder(list):
"""
A list of sets of Variables.
"""
def __getitem__(self, item):
"""
:param item: ``Variable``
:return: the set containing 'item'
"""
assert isinstance(item, Variable)
for s in self:
if item in s:
return s
# item is not found in any existing set. so create a new set
new = set([item])
self.append(new)
return new
class ClosedWorldProver(ProverCommandDecorator):
"""
This is a prover decorator that completes predicates before proving.
If the assumptions contain "P(A)", then "all x.(P(x) -> (x=A))" is the completion of "P".
If the assumptions contain "all x.(ostrich(x) -> bird(x))", then "all x.(bird(x) -> ostrich(x))" is the completion of "bird".
If the assumptions don't contain anything that are "P", then "all x.-P(x)" is the completion of "P".
walk(Socrates)
Socrates != Bill
+ all x.(walk(x) -> (x=Socrates))
----------------
-walk(Bill)
see(Socrates, John)
see(John, Mary)
Socrates != John
John != Mary
+ all x.all y.(see(x,y) -> ((x=Socrates & y=John) | (x=John & y=Mary)))
----------------
-see(Socrates, Mary)
all x.(ostrich(x) -> bird(x))
bird(Tweety)
-ostrich(Sam)
Sam != Tweety
+ all x.(bird(x) -> (ostrich(x) | x=Tweety))
+ all x.-ostrich(x)
-------------------
-bird(Sam)
"""
def assumptions(self):
assumptions = self._command.assumptions()
predicates = self._make_predicate_dict(assumptions)
new_assumptions = []
for p in predicates:
predHolder = predicates[p]
new_sig = self._make_unique_signature(predHolder)
new_sig_exs = [VariableExpression(v) for v in new_sig]
disjuncts = []
# Turn the signatures into disjuncts
for sig in predHolder.signatures:
equality_exs = []
for v1, v2 in zip(new_sig_exs, sig):
equality_exs.append(EqualityExpression(v1, v2))
disjuncts.append(reduce(lambda x, y: x & y, equality_exs))
# Turn the properties into disjuncts
for prop in predHolder.properties:
# replace variables from the signature with new sig variables
bindings = {}
for v1, v2 in zip(new_sig_exs, prop[0]):
bindings[v2] = v1
disjuncts.append(prop[1].substitute_bindings(bindings))
# make the assumption
if disjuncts:
# disjuncts exist, so make an implication
antecedent = self._make_antecedent(p, new_sig)
consequent = reduce(lambda x, y: x | y, disjuncts)
accum = ImpExpression(antecedent, consequent)
else:
# nothing has property 'p'
accum = NegatedExpression(self._make_antecedent(p, new_sig))
# quantify the implication
for new_sig_var in new_sig[::-1]:
accum = AllExpression(new_sig_var, accum)
new_assumptions.append(accum)
return assumptions + new_assumptions
def _make_unique_signature(self, predHolder):
"""
This method figures out how many arguments the predicate takes and
returns a tuple containing that number of unique variables.
"""
return tuple(unique_variable() for i in range(predHolder.signature_len))
def _make_antecedent(self, predicate, signature):
"""
Return an application expression with 'predicate' as the predicate
and 'signature' as the list of arguments.
"""
antecedent = predicate
for v in signature:
antecedent = antecedent(VariableExpression(v))
return antecedent
def _make_predicate_dict(self, assumptions):
"""
Create a dictionary of predicates from the assumptions.
:param assumptions: a list of ``Expression``s
:return: dict mapping ``AbstractVariableExpression`` to ``PredHolder``
"""
predicates = defaultdict(PredHolder)
for a in assumptions:
self._map_predicates(a, predicates)
return predicates
def _map_predicates(self, expression, predDict):
if isinstance(expression, ApplicationExpression):
func, args = expression.uncurry()
if isinstance(func, AbstractVariableExpression):
predDict[func].append_sig(tuple(args))
elif isinstance(expression, AndExpression):
self._map_predicates(expression.first, predDict)
self._map_predicates(expression.second, predDict)
elif isinstance(expression, AllExpression):
# collect all the universally quantified variables
sig = [expression.variable]
term = expression.term
while isinstance(term, AllExpression):
sig.append(term.variable)
term = term.term
if isinstance(term, ImpExpression):
if isinstance(term.first, ApplicationExpression) and isinstance(
term.second, ApplicationExpression
):
func1, args1 = term.first.uncurry()
func2, args2 = term.second.uncurry()
if (
isinstance(func1, AbstractVariableExpression)
and isinstance(func2, AbstractVariableExpression)
and sig == [v.variable for v in args1]
and sig == [v.variable for v in args2]
):
predDict[func2].append_prop((tuple(sig), term.first))
predDict[func1].validate_sig_len(sig)
class PredHolder(object):
"""
This class will be used by a dictionary that will store information
about predicates to be used by the ``ClosedWorldProver``.
The 'signatures' property is a list of tuples defining signatures for
which the predicate is true. For instance, 'see(john, mary)' would be
result in the signature '(john,mary)' for 'see'.
The second element of the pair is a list of pairs such that the first
element of the pair is a tuple of variables and the second element is an
expression of those variables that makes the predicate true. For instance,
'all x.all y.(see(x,y) -> know(x,y))' would result in "((x,y),('see(x,y)'))"
for 'know'.
"""
def __init__(self):
self.signatures = []
self.properties = []
self.signature_len = None
def append_sig(self, new_sig):
self.validate_sig_len(new_sig)
self.signatures.append(new_sig)
def append_prop(self, new_prop):
self.validate_sig_len(new_prop[0])
self.properties.append(new_prop)
def validate_sig_len(self, new_sig):
if self.signature_len is None:
self.signature_len = len(new_sig)
elif self.signature_len != len(new_sig):
raise Exception("Signature lengths do not match")
def __str__(self):
return "(%s,%s,%s)" % (self.signatures, self.properties, self.signature_len)
def __repr__(self):
return "%s" % self
def closed_domain_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"exists x.walk(x)")
p2 = lexpr(r"man(Socrates)")
c = lexpr(r"walk(Socrates)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"exists x.walk(x)")
p2 = lexpr(r"man(Socrates)")
p3 = lexpr(r"-walk(Bill)")
c = lexpr(r"walk(Socrates)")
prover = Prover9Command(c, [p1, p2, p3])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"exists x.walk(x)")
p2 = lexpr(r"man(Socrates)")
p3 = lexpr(r"-walk(Bill)")
c = lexpr(r"walk(Socrates)")
prover = Prover9Command(c, [p1, p2, p3])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"walk(Socrates)")
p2 = lexpr(r"walk(Bill)")
c = lexpr(r"all x.walk(x)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
p1 = lexpr(r"girl(mary)")
p2 = lexpr(r"dog(rover)")
p3 = lexpr(r"all x.(girl(x) -> -dog(x))")
p4 = lexpr(r"all x.(dog(x) -> -girl(x))")
p5 = lexpr(r"chase(mary, rover)")
c = lexpr(r"exists y.(dog(y) & all x.(girl(x) -> chase(x,y)))")
prover = Prover9Command(c, [p1, p2, p3, p4, p5])
print(prover.prove())
cdp = ClosedDomainProver(prover)
print("assumptions:")
for a in cdp.assumptions():
print(" ", a)
print("goal:", cdp.goal())
print(cdp.prove())
def unique_names_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"man(Socrates)")
p2 = lexpr(r"man(Bill)")
c = lexpr(r"exists x.exists y.(x != y)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
unp = UniqueNamesProver(prover)
print("assumptions:")
for a in unp.assumptions():
print(" ", a)
print("goal:", unp.goal())
print(unp.prove())
p1 = lexpr(r"all x.(walk(x) -> (x = Socrates))")
p2 = lexpr(r"Bill = William")
p3 = lexpr(r"Bill = Billy")
c = lexpr(r"-walk(William)")
prover = Prover9Command(c, [p1, p2, p3])
print(prover.prove())
unp = UniqueNamesProver(prover)
print("assumptions:")
for a in unp.assumptions():
print(" ", a)
print("goal:", unp.goal())
print(unp.prove())
def closed_world_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"walk(Socrates)")
p2 = lexpr(r"(Socrates != Bill)")
c = lexpr(r"-walk(Bill)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
cwp = ClosedWorldProver(prover)
print("assumptions:")
for a in cwp.assumptions():
print(" ", a)
print("goal:", cwp.goal())
print(cwp.prove())
p1 = lexpr(r"see(Socrates, John)")
p2 = lexpr(r"see(John, Mary)")
p3 = lexpr(r"(Socrates != John)")
p4 = lexpr(r"(John != Mary)")
c = lexpr(r"-see(Socrates, Mary)")
prover = Prover9Command(c, [p1, p2, p3, p4])
print(prover.prove())
cwp = ClosedWorldProver(prover)
print("assumptions:")
for a in cwp.assumptions():
print(" ", a)
print("goal:", cwp.goal())
print(cwp.prove())
p1 = lexpr(r"all x.(ostrich(x) -> bird(x))")
p2 = lexpr(r"bird(Tweety)")
p3 = lexpr(r"-ostrich(Sam)")
p4 = lexpr(r"Sam != Tweety")
c = lexpr(r"-bird(Sam)")
prover = Prover9Command(c, [p1, p2, p3, p4])
print(prover.prove())
cwp = ClosedWorldProver(prover)
print("assumptions:")
for a in cwp.assumptions():
print(" ", a)
print("goal:", cwp.goal())
print(cwp.prove())
def combination_prover_demo():
lexpr = Expression.fromstring
p1 = lexpr(r"see(Socrates, John)")
p2 = lexpr(r"see(John, Mary)")
c = lexpr(r"-see(Socrates, Mary)")
prover = Prover9Command(c, [p1, p2])
print(prover.prove())
command = ClosedDomainProver(UniqueNamesProver(ClosedWorldProver(prover)))
for a in command.assumptions():
print(a)
print(command.prove())
def default_reasoning_demo():
lexpr = Expression.fromstring
premises = []
# define taxonomy
premises.append(lexpr(r"all x.(elephant(x) -> animal(x))"))
premises.append(lexpr(r"all x.(bird(x) -> animal(x))"))
premises.append(lexpr(r"all x.(dove(x) -> bird(x))"))
premises.append(lexpr(r"all x.(ostrich(x) -> bird(x))"))
premises.append(lexpr(r"all x.(flying_ostrich(x) -> ostrich(x))"))
# default properties
premises.append(
lexpr(r"all x.((animal(x) & -Ab1(x)) -> -fly(x))")
) # normal animals don't fly
premises.append(
lexpr(r"all x.((bird(x) & -Ab2(x)) -> fly(x))")
) # normal birds fly
premises.append(
lexpr(r"all x.((ostrich(x) & -Ab3(x)) -> -fly(x))")
) # normal ostriches don't fly
# specify abnormal entities
premises.append(lexpr(r"all x.(bird(x) -> Ab1(x))")) # flight
premises.append(lexpr(r"all x.(ostrich(x) -> Ab2(x))")) # non-flying bird
premises.append(lexpr(r"all x.(flying_ostrich(x) -> Ab3(x))")) # flying ostrich
# define entities
premises.append(lexpr(r"elephant(E)"))
premises.append(lexpr(r"dove(D)"))
premises.append(lexpr(r"ostrich(O)"))
# print the assumptions
prover = Prover9Command(None, premises)
command = UniqueNamesProver(ClosedWorldProver(prover))
for a in command.assumptions():
print(a)
print_proof("-fly(E)", premises)
print_proof("fly(D)", premises)
print_proof("-fly(O)", premises)
def print_proof(goal, premises):
lexpr = Expression.fromstring
prover = Prover9Command(lexpr(goal), premises)
command = UniqueNamesProver(ClosedWorldProver(prover))
print(goal, prover.prove(), command.prove())
def demo():
closed_domain_demo()
unique_names_demo()
closed_world_demo()
combination_prover_demo()
default_reasoning_demo()
if __name__ == "__main__":
demo()
| [
"[email protected]"
] | |
167db560b7a88ff7a3f779a57cf322b7a9e0603d | 9028b6983685a3ace074049fccf2b8c503b77de8 | /PyStationB/libraries/ABEX/abex/simulations/plot_predicted_optimum_convergence.py | 4192c3ad9b51d68e2550387eb4284275c52ba4f0 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | mebristo/station-b-libraries | 7f5517e5e77e6cdc54c03355804b8c0a4fcae65b | 40bab526af6562653c42dbb32b174524c44ce2ba | refs/heads/main | 2023-09-03T03:54:53.181082 | 2021-10-01T03:21:11 | 2021-10-01T03:21:11 | 412,871,835 | 0 | 0 | MIT | 2021-10-02T17:53:07 | 2021-10-02T17:53:06 | null | UTF-8 | Python | false | false | 18,074 | py | # -------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# -------------------------------------------------------------------------------------------
import argparse
import logging
from pathlib import Path
from typing import List, Dict, Type, cast, Optional
import numpy as np
import pandas as pd
from abex.plotting.convergence_plotting import plot_objective_distribution_convergence
from abex.settings import OptimizerConfig, OptimizationStrategy, simple_load_config
from abex.simulations import SimulatorBase
from azureml.core import Run
from azureml.core.run import _OfflineRun
from matplotlib import pyplot as plt
from psbutils.psblogging import logging_to_stdout
def create_parser() -> argparse.ArgumentParser: # pragma: no cover
"""
Argument parser for plotting.
"""
parser = argparse.ArgumentParser(
description="Plot convergence over several iterations of Bayesian Optimization on a simulator by "
"plotting the distribution of the objective from the simulator at the optima predicted during the run."
)
parser.add_argument(
"--experiment_dirs",
type=Path,
action="append",
required=True,
help="A sequence of directories corresponding to different configurations for an experiment "
"(different runs). Each directory should contain multiple sub-directories with multiple sub-runs "
"corresponding to different random seeds. Those sub-directories should contain result directories "
"for each iteration of the optimisation algorithm.",
)
parser.add_argument(
"--experiment_labels",
type=str,
action="append",
choices=["acquisition", "batch_strategy", "optimization_strategy", "batch", "shrinking_factor", "hmc"],
)
parser.add_argument(
"--output_dir",
type=Path,
help="The resulting plot will be saved at this location.",
)
parser.add_argument("--title", type=str, default=None, help="The title for the plot.")
parser.add_argument(
"--max_batch_number",
type=int,
default=None,
help="Whether to clip the x-axis to a given number of batches.",
)
parser.add_argument(
"--output_scale",
type=str,
default=None,
choices=["symlog", "log", "linear"],
help="What scale to use for the objective on the plot. Default to log if all objective values are positive, "
"symlog otherwise.",
)
parser.add_argument(
"--styled_subset_param",
type=str,
default=[],
action="append",
help="Group experiments by given parameter(s) to distinguish them as related in plots. Currently only"
"acquisition, batch_strategy and optimization_strategy are accepted values.",
)
parser.add_argument(
"--style_category_name",
type=str,
default="Category",
help="Name to use on the legend for the style categories.",
)
parser.add_argument(
"--num_simulator_samples_per_optimum",
type=int,
default=1,
help="The number of objective samples to draw from the simulator at each suggested optimum location. "
"The higher the number, the more accurrate the plot of the distribution at suggested optimum will be.",
)
return parser
BATCH_COLUMN = "Batch Number"
RUN_NAME_COLUMN = "Experiment Name"
SEED_COLUMN = "Sub-run Number"
OBJECTIVE_COLUMN = "Objective"
CONFIG_FILENAME = "config.yml"
ITERATION_DIR_PREFIX: str = "iter"
OPTIMA_FILE = "optima.csv"
def _extract_iteration_number(iteration_dir: Path) -> int: # pragma: no cover
"""Converts `iterXY` into integer `XY`."""
n = len(ITERATION_DIR_PREFIX)
iteration_dir_name = iteration_dir.stem
return int(iteration_dir_name[n:])
def load_optimum_file(
path: Path, input_names: List[str], iteration_number: int
) -> Dict[str, float]: # pragma: no cover
"""Reads the file with the location of the optimum
Args:
path: the CSV with the optimum
input_names: inputs which should be retained
iteration_number: iteration number, to be added to the returned dictionary
Returns:
dictionary which keys are `input_names` and `BATCH_COLUMN`.
Raises:
ValueError, if `path` contains more than one row (the optimum is not unique)
"""
# Read the CSV with optima and raise an error if it contains more than one data point
iteration_optimum: pd.DataFrame = pd.read_csv(path) # type: ignore # auto
if len(iteration_optimum) > 1:
raise ValueError
# Read the input values
point = {name: iteration_optimum[name].values[0] for name in input_names}
# Add the information about the iteration
point[BATCH_COLUMN] = iteration_number
return point # type: ignore # auto
def load_seeded_subrun_df(subrun_dir: Path, input_names: List[str]) -> pd.DataFrame: # pragma: no cover
"""Return a DataFrame with the optima suggested in this 'sub-run'. This function iterates over the result
directories starting with "iter" in this directory , and assumes the remaining part of the result directory
name indicates the iteration. Adds a column to the DataFrame to indicate batch number.
"""
iteration_dir_pattern: str = f"{ITERATION_DIR_PREFIX}*"
iteration_directories: List[Path] = list(subrun_dir.glob(iteration_dir_pattern))
iteration_directories = sorted(iteration_directories, key=lambda path: _extract_iteration_number(path))
# This is a list of optima (one for each iteration). Will be converted into a dataframe at the end.
proto_dataframe = []
# Now add the information about 1st, 2nd, ... iterations.
for iteration_directory in iteration_directories:
optimum_path = iteration_directory / OPTIMA_FILE
iteration_number = _extract_iteration_number(iteration_directory)
sample = load_optimum_file(path=optimum_path, input_names=input_names, iteration_number=iteration_number)
proto_dataframe.append(sample)
return pd.DataFrame(proto_dataframe)
def _get_input_names(experiment_dir: Path, loop_config: Type[OptimizerConfig]) -> List[str]: # pragma: no cover
"""Read the input names in an experiment.
Args:
experiment_dir: experiment directory
Returns:
list with input names, as read from the config
"""
config = load_config_from_expt_dir(experiment_dir, loop_config)
input_names = [name for name in config.data.input_names]
return input_names
def _get_seeded_runs(experiment_dir: Path) -> Dict[str, Path]: # pragma: no cover
"""
Returns:
a dictionary in the format `subrun_name: path to the subrun` (containing many iterations)
"""
subrun_paths = [child for child in experiment_dir.glob("*/seed*") if child.is_dir()]
subrun_names = [_path_subrun_name(path) for path in subrun_paths]
logging.info(f"experiment_dir is {experiment_dir} with subdirs {subrun_names}")
runs_dictionary = dict(zip(subrun_names, subrun_paths))
return runs_dictionary
def _path_subrun_name(path: Path) -> str: # pragma: no cover
"""
Returns a suitable name for a subrun located at .../selection_spec/seedNNN. If selection_spec
is "fixed", just return seedNNN; otherwise, prepend selection_spec.
TODO: This function may be modified at later stage, to return the name in a prettier format.
"""
parent_stem = path.parent.stem
if parent_stem == "fixed":
return path.stem
return f"{parent_stem}_{path.stem}"
def load_experiment_label(config: OptimizerConfig, experiment_label_params: List[str]) -> str:
# TODO: make experiment_label_params a list of enums and define a more sophisticated label. e.g. include HMC
"""
Create an experiment label based on options specified in the config file
Args:
config:
experiment_label_params: A list of argument names to append to experiment label
Returns: A string representing the label for one combination of acquisition plus batch strategy
plus optimization strategy.
E.g.
EXPECTED_IMPROVEMENT - LocalPenalization - Bayes
MEAN_PLUGIN_EXPECTED_IMPROVEMENT - LocalPenalization - Zoom(0.5)
"""
assert any([config.bayesopt, config.zoomopt])
optimization_strategy = config.optimization_strategy
experiment_label = ""
for label_param in experiment_label_params:
if len(experiment_label) > 0:
experiment_label += " "
# start with properties common to all optimization strategies
if label_param == "optimization_strategy":
experiment_label += f"{config.optimization_strategy}"
elif label_param == "hmc":
if config.training.hmc:
experiment_label += "hmc"
else:
# add properties specific to Bayesopt
if optimization_strategy == OptimizationStrategy.BAYESIAN:
if label_param == "acquisition":
experiment_label += f"{config.bayesopt.acquisition}"
elif label_param == "batch_strategy":
experiment_label += f"{config.bayesopt.batch_strategy.value}"
elif label_param == "batch":
experiment_label += f"batch{config.bayesopt.batch}"
# add properties specific to Zoomopt
elif optimization_strategy == OptimizationStrategy.ZOOM:
assert config.zoomopt is not None
if label_param == "shrinking_factor":
experiment_label += f"({config.zoomopt.shrinking_factor})"
elif label_param == "batch":
experiment_label += f"batch{config.zoomopt.batch}"
return experiment_label
def load_experiment_df(experiment_dir: Path, loop_config: Type[OptimizerConfig]) -> pd.DataFrame: # pragma: no cover
"""Return a DataFrame with accumulated observations from each sub-run in this directory. Each sub-directory
in the folder `experiment_dir` is assumed to correspond to a single optimization run (with possibly
different random seeds). Adds a column to the DataFrame to indicate sub-run ID (the ID is arbitrary).
"""
assert experiment_dir.exists(), f"A directory at {experiment_dir} must exist."
assert experiment_dir.is_dir(), f"A directory at {experiment_dir} must exist, but is not a directory."
# Get the input names
input_names: List[str] = _get_input_names(experiment_dir, loop_config)
# Get seeded runs
seeded_runs = _get_seeded_runs(experiment_dir)
experiment_dfs = []
for subrun_name, subrun_dir in seeded_runs.items():
subrun_df: pd.DataFrame = load_seeded_subrun_df(subrun_dir, input_names=input_names)
subrun_df[SEED_COLUMN] = subrun_name
experiment_dfs.append(subrun_df)
if len({len(one_seed_subrun_df) for one_seed_subrun_df in experiment_dfs}) != 1:
logging.warning(f"Not all subruns in {experiment_dir} have the same length.")
return pd.concat(experiment_dfs) # type: ignore
def validate_simulator_settings_same(configs: List[OptimizerConfig]) -> None: # pragma: no cover
"""
Validates that the fields corresponding to the simulator settings are the same in all configs for all
runs (experiments) being compared.
Args:
configs (List[OptimizerConfig]): List of configs for each run (experiment).
"""
assert len(configs) > 0
for config in configs:
for simulator_config_attr in config.get_consistent_simulator_fields():
if getattr(config, simulator_config_attr) != getattr(configs[0], simulator_config_attr):
raise ValueError(
f"The value of {simulator_config_attr} is different in experiment directories given. "
f"It's: {getattr(config, simulator_config_attr)} vs {getattr(configs[0], simulator_config_attr)}"
)
def load_config_from_expt_dir(experiment_dir: Path, loop_config: Type[OptimizerConfig]) -> OptimizerConfig:
"""
Locate a config file in experiment_dir or one of its subdirectories (for a per-seed config).
Config files are now normally in seed subdirectories, as they contain seed values.
"""
config_files = sorted(experiment_dir.glob(f"*/seed*/{CONFIG_FILENAME}")) or [experiment_dir / CONFIG_FILENAME]
config_file = config_files[0]
if not config_file.exists():
raise FileNotFoundError(f"Cannot find {CONFIG_FILENAME} at or under {experiment_dir}") # pragma: no cover
return cast(loop_config, simple_load_config(config_file, config_class=loop_config)) # type: ignore
def load_combined_df(
experiment_dirs: List[Path],
loop_config: Type[OptimizerConfig],
experiment_label_params: List[str],
styled_subset_params: List[str] = [],
) -> pd.DataFrame: # pragma: no cover
"""Return a DataFrame with observations from each run specified in run_dirs. The returned DataFrame
will have additional columns for: run name, sub-run id and batch number. Here, a sub-run is a single optimization
run/experiment where multiple batches are collected. A run is a collection of those sub-runs (with different
random initialisations) that share the same model/optimization configuration.
"""
dfs = []
for run_dir in experiment_dirs:
run_df: pd.DataFrame = load_experiment_df(run_dir, loop_config)
config = load_config_from_expt_dir(run_dir, loop_config)
run_name = load_experiment_label(config, experiment_label_params)
for styled_subset_param in styled_subset_params:
config_dict = config.dict()
# TODO: currently only acquisition, batch_strategy and optimization_strategy are allowed
# as styled_subset_params, hence indexing config by bayesopt makes sense, but this won't
# necessarily hold if this list is increased.
if styled_subset_param == "acquisition":
styled_subset_val = config_dict["bayesopt"][styled_subset_param]
elif styled_subset_param == "batch_strategy":
styled_subset_val = config_dict["bayesopt"][styled_subset_param].value
elif styled_subset_param == "optimization_strategy":
styled_subset_val = config_dict[styled_subset_param]
else:
raise ValueError(
f"styled_subset_param must be one of "
f"[acquisition, batch_strategy, optimization_strategy]."
f"Found {styled_subset_param}"
)
run_df[styled_subset_param] = styled_subset_val
run_df[RUN_NAME_COLUMN] = run_name
dfs.append(run_df)
return pd.concat(dfs) # type: ignore
def plot_predicted_optimum_covergence(
arg_list: Optional[List[str]], loop_config: Type[OptimizerConfig]
) -> None: # pragma: no cover
"""
Main entry point for plotting combined results.
:param arg_list: command-line arguments
:param loop_config: OptimizerConfig subclass that has been used
"""
args = create_parser().parse_args(arg_list)
logging_to_stdout()
styled_subset_params = args.styled_subset_param if len(args.styled_subset_param) > 0 else []
# Load the optima location data
combined_df = load_combined_df(
experiment_dirs=args.experiment_dirs,
loop_config=loop_config,
experiment_label_params=args.experiment_labels,
styled_subset_params=styled_subset_params,
)
# Clip the number of batches if max_batch_num specified
combined_df = (
combined_df[combined_df[BATCH_COLUMN] <= args.max_batch_number] if args.max_batch_number else combined_df
)
# Load the config (as given in run_loop_multiple_runs) for each experiment. Then assert that the
# parts corresponding to simulator are the same.
configs = [load_config_from_expt_dir(Path(experiment_dir), loop_config) for experiment_dir in args.experiment_dirs]
assert len(configs) > 0
# Validate all settings for the simulator are the same in all the configs:
validate_simulator_settings_same(configs) # type: ignore # auto
# Pick one config:
config = configs[0]
# Create a simulator from config
simulator = cast(SimulatorBase, config.get_simulator())
# Replicate each suggested optimum args.num_simulator_samples_per_optimum times.
combined_df_replicated = pd.DataFrame(
np.repeat(combined_df.values, args.num_simulator_samples_per_optimum, axis=0) # type: ignore # auto
) # type: ignore # auto
combined_df_replicated.columns = combined_df.columns
optima_locations_array = combined_df_replicated[simulator.parameter_space.parameter_names].to_numpy(
np.float64 # type: ignore
)
combined_df_replicated[OBJECTIVE_COLUMN] = simulator.sample_objective(optima_locations_array)
# Determine the output scale for the plot
if args.output_scale is not None:
output_scale = args.output_scale
else:
output_scale = "log" if (combined_df_replicated[OBJECTIVE_COLUMN] > 0).all() else "symlog" # type: ignore
fig, _ = plot_objective_distribution_convergence(
combined_df_replicated,
objective_col=OBJECTIVE_COLUMN,
batch_num_col=BATCH_COLUMN,
run_col=RUN_NAME_COLUMN,
style_cols=styled_subset_params,
yscale=output_scale,
)
assert fig is not None
# Possibly add title
if args.title: # type: ignore
fig.suptitle(args.title)
# Save the plot:
args.output_dir.mkdir(exist_ok=True)
output_path = args.output_dir / "styled_groups.pdf"
fig.savefig(output_path, bbox_inches="tight")
run = Run.get_context()
if not isinstance(run, _OfflineRun):
fig.tight_layout()
logging.info("Logging convergence plot to AML")
run.log_image(name="styled_groups", plot=plt)
plt.close(fig)
| [
"[email protected]"
] | |
61689e089f90433880de471c2b687b9a35801ef4 | cd8b429ba73017bd20d60b20e4d6dcf05ba44691 | /profiles/migrations/0003_profile_location.py | aef6ac69739be6cf62ea6bfa014aece7716dbe64 | [] | no_license | henrymbuguak/E-commerce-Site-Created-Using-Django-1.11.1 | 61d45f1f6861b9b8d308519660f2719d5d0e7b4e | 327f6faa7fe8d13e9dad913b5b9f90884d77fbdd | refs/heads/master | 2021-11-29T00:28:02.300796 | 2021-11-26T16:48:20 | 2021-11-26T16:48:20 | 93,396,154 | 16 | 17 | null | 2021-11-26T16:48:21 | 2017-06-05T11:16:58 | Python | UTF-8 | Python | false | false | 480 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-06-03 18:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0002_profile_description'),
]
operations = [
migrations.AddField(
model_name='profile',
name='location',
field=models.CharField(default='Nairobi,Kenya', max_length=120),
),
]
| [
"[email protected]"
] | |
832eba339e037d20014f155348e0ebee2b4ace38 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_1/dngmon002/question1.py | 440db247854a75f0996c65246e23cce55cd49087 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 328 | py | # Fancy art output
# Monwabisi Dingane
# 25 February 2014
print(" ____ ____ ___ ____ _____ _ _ _ _ _ ")
print(" / ___/ ___|_ _/ ___|| ___| | | | \ | | |")
print("| | \___ \| |\___ \| |_ | | | | \| | |")
print("| |___ ___) | | ___) | _| | |_| | |\ |_|")
print(" \____|____/___|____/|_| \___/|_| \_(_)")
| [
"[email protected]"
] | |
83745ed9a8e0c07cda36512d74784643936d8f65 | 2fd087fbc5faf43940153693823969df6c8ec665 | /pyc_decrypted/latest/pymac/dlls/FSEvent.py | 6fdf9e3df3d8a50f53eb9769bec23e32851ce298 | [] | no_license | mickeystone/DropBoxLibrarySRC | ed132bbffda7f47df172056845e5f8f6c07fb5de | 2e4a151caa88b48653f31a22cb207fff851b75f8 | refs/heads/master | 2021-05-27T05:02:30.255399 | 2013-08-27T13:16:55 | 2013-08-27T13:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,564 | py | #Embedded file name: pymac/dlls/FSEvent.py
from __future__ import absolute_import
from ctypes import POINTER, c_ubyte
from ..lazydll import FakeDLL
from ..lazyframework import LazyFramework
from ..types import CFAllocatorRef, CFArrayRef, CFUUIDRef, CFRunLoopRef, CFStringRef, CFTimeInterval, dev_t, FSEventStreamCallback, FSEventStreamContext, FSEventStreamCreateFlags, FSEventStreamEventId, FSEventStreamRef
class LazyFSEvent(LazyFramework):
def __init__(self):
super(LazyFSEvent, self).__init__()
self._dllname = u'Foundation'
self._func_defs = {}
def F(name, ret = None, args = [], errcheck = None):
self._func_defs[name] = {'restype': ret,
'argtypes': args}
F('FSEventsCopyUUIDForDevice', CFUUIDRef, [dev_t])
F('FSEventStreamCreate', FSEventStreamRef, [CFAllocatorRef,
FSEventStreamCallback,
POINTER(FSEventStreamContext),
CFArrayRef,
FSEventStreamEventId,
CFTimeInterval,
FSEventStreamCreateFlags])
F('FSEventStreamGetLatestEventId', FSEventStreamEventId, [FSEventStreamRef])
F('FSEventsGetCurrentEventId', FSEventStreamEventId, None)
F('FSEventStreamStart', c_ubyte, [FSEventStreamRef])
F('FSEventStreamInvalidate', None, [FSEventStreamRef])
F('FSEventStreamRelease', None, [FSEventStreamRef])
F('FSEventStreamStop', None, [FSEventStreamRef])
F('FSEventStreamScheduleWithRunLoop', None, [FSEventStreamRef, CFRunLoopRef, CFStringRef])
FSEvent = FakeDLL(LazyFSEvent)
| [
"[email protected]"
] | |
bc90df29176cb40490d288bb8254b2327d3d0992 | dd770e697daddab20e09fbf8ce199c97ee540c37 | /bigtop-packages/src/charm/zookeeper/layer-zookeeper/actions/smoke-test | 64814629a162acdf459dab3899fc8a1978368d94 | [
"FreeBSD-DOC",
"MIT",
"DOC",
"Apache-2.0"
] | permissive | PKConsul/bigtop | 0e7b5133be17a2093c0d5279b000c60b67072a16 | 2f8311b184bf0c5d25756b098895e43b1dbc3c2e | refs/heads/master | 2021-01-20T02:08:29.012667 | 2017-04-22T17:44:30 | 2017-04-23T06:27:13 | 89,379,381 | 1 | 0 | null | 2017-04-25T15:53:29 | 2017-04-25T15:53:29 | null | UTF-8 | Python | false | false | 1,526 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('lib')
from charmhelpers.core import hookenv # noqa: E402
from charms.layer.apache_bigtop_base import Bigtop # noqa: E402
from charms.reactive import is_state # noqa: E402
def fail(msg, output=None):
if output:
hookenv.action_set({'output': output})
hookenv.action_fail(msg)
sys.exit()
if not is_state('zookeeper.started'):
fail('Charm is not yet ready to run the Bigtop smoke test(s)')
# Bigtop smoke test components
smoke_components = ['zookeeper']
bigtop = Bigtop()
result = bigtop.run_smoke_tests(smoke_components)
if result == 'success':
hookenv.action_set({'outcome': result})
else:
fail('{} smoke tests failed'.format(smoke_components), result)
| [
"[email protected]"
] | ||
76532f0bfd74859dc88b4d5b0f4c9a449a6a84e2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/stacks_20200703102749.py | 12f8182c2862178934b96e70b251a7b34a1c4479 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # stacks - where the last item to be added is the first to be reversed
# reversing an array using stacks
# def stacks(arr):
# arr.append(6)
# arr.append(7)
# newArr = []
# for i in range(len(arr)):
# newArr.append(arr.pop())
# print(newArr)
# stacks([3,4,5])
# ========================================================
# Queue
from collections import deque
def Queue():
| [
"[email protected]"
] | |
8868615a32d3f22a1d4f5f08ac876bc011a65f74 | 18c9109e3e6dfea227b80e0a8ebc5e92cfa117d3 | /tests/unit/shared/test_containers.py | 38a022c659f4139352e210f735651c518af4df3e | [
"Apache-2.0"
] | permissive | Xilinx/pyxir | 9b0179da550471d251acd95c26e9bfe6f54502dd | 8ce8a385a155f3ffdd84ce61501ca870cfb4a905 | refs/heads/master | 2023-09-05T12:07:59.732179 | 2022-03-31T19:24:48 | 2022-03-31T19:24:48 | 265,640,658 | 34 | 23 | Apache-2.0 | 2022-05-29T08:05:58 | 2020-05-20T17:36:17 | Python | UTF-8 | Python | false | false | 2,077 | py | # Copyright 2020 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Module for testing the StrContainer and BytesContainerdata structure """
import unittest
import libpyxir as lpx
from pyxir.shared.container import StrContainer, BytesContainer
class TestStrContainer(unittest.TestCase):
def test_constructor(self):
s = "test"
sc = StrContainer(s)
assert sc.get_str() == "test"
def test_eq(self):
s = "test"
sc = StrContainer(s)
assert sc == "test"
def test_set_str(self):
s = "test"
sc = StrContainer(s)
sc.set_str("2")
assert sc == "2"
assert sc.get_str() == "2"
class TestBytesContainer(unittest.TestCase):
def test_constructor(self):
b = b"test"
bc = BytesContainer(b)
assert isinstance(bc.get_bytes(), bytes)
assert bc.get_bytes() == b"test"
assert bc.get_bytes() != "test"
b2 = "test".encode('latin1')
bc2 = BytesContainer(b2)
assert bc.get_bytes() == "test".encode('latin1')
def test_eq(self):
b = b"test"
bc = BytesContainer(b)
assert bc == b"test"
def test_set_bytes(self):
b = b"test"
bc = BytesContainer(b)
bc.set_bytes(b"2")
assert bc == b"2"
assert bc.get_bytes() == b"2"
def test_set_bytes_latin1(self):
b = b"test"
bc = BytesContainer(b)
bc.set_bytes("2".encode('latin1'))
assert bc == "2".encode('latin1')
assert bc.get_bytes() == "2".encode('latin1')
| [
"[email protected]"
] | |
69f15ffcdc89289d39337e9f2cbdc77eeb439882 | 3e1fcf34eae508a3f3d4668edfb334069a88db3d | /tests/test_case_info.py | f9f4bb37beb8939a3a6647694a29a2215398d96f | [
"ISC"
] | permissive | mscarey/court-scraper | 26d32cb7354b05bb5d5d27a55bf4042e5dde1a4d | e29135331526a11aa5eb0445a9223fc3f7630895 | refs/heads/main | 2023-07-14T20:23:33.488766 | 2020-08-31T14:02:19 | 2020-08-31T14:02:19 | 384,977,976 | 0 | 0 | ISC | 2021-07-11T15:04:57 | 2021-07-11T15:04:57 | null | UTF-8 | Python | false | false | 788 | py | from court_scraper.case_info import CaseInfo
def test_attribute_mapping():
mapping = { 'case_num': 'number', }
data = { 'foo': 'bar', 'case_num': '1' }
CaseInfo._map = mapping
ci = CaseInfo(data)
assert hasattr(ci, 'case_num') == False
assert ci.number == '1'
assert ci.foo == 'bar'
def test_standardized_data():
mapping = {
'case_num': 'number',
}
data = {
'place_id': 'ga_dekalb',
'case_num': '1',
'status': 'Open',
'foo': 'bar',
}
# Number should be standardized,
# and foo should not appear
expected = {
'place_id': 'ga_dekalb',
'number': '1',
'status': 'Open',
}
CaseInfo._map = mapping
ci = CaseInfo(data)
assert ci.standard_data == expected
| [
"[email protected]"
] | |
95ef73ae86b57acc18a1491c332ea73babf5daf3 | b924079a344e718f1de3dccdae8064c8c24be373 | /quantum/service.py | 48ef432e2ae732b5fea5389b6ee75bad647545e8 | [
"Apache-2.0"
] | permissive | ruijie/quantum | b24a14636a00c2363e1f2f365f41b58f6a5f1c07 | b63a721785801a3b6f0aeb10bb2eb49b76323496 | refs/heads/master | 2021-01-20T05:08:35.704182 | 2012-11-12T08:33:23 | 2012-11-12T08:33:23 | 6,650,142 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,876 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from quantum.common import config
from quantum.openstack.common import cfg
from quantum import wsgi
LOG = logging.getLogger(__name__)
class WsgiService(object):
"""Base class for WSGI based services.
For each api you define, you must also define these flags:
:<api>_listen: The address on which to listen
:<api>_listen_port: The port on which to listen
"""
def __init__(self, app_name):
self.app_name = app_name
self.wsgi_app = None
def start(self):
self.wsgi_app = _run_wsgi(self.app_name)
def wait(self):
self.wsgi_app.wait()
class QuantumApiService(WsgiService):
"""Class for quantum-api service."""
@classmethod
def create(cls):
app_name = "quantum"
# Setup logging early, supplying both the CLI options and the
# configuration mapping from the config file
# We only update the conf dict for the verbose and debug
# flags. Everything else must be set up in the conf file...
# Log the options used when starting if we're in debug mode...
config.setup_logging(cfg.CONF)
LOG.debug("*" * 80)
LOG.debug("Configuration options gathered from config file:")
LOG.debug("================================================")
items = dict([(k, v) for k, v in cfg.CONF.items()
if k not in ('__file__', 'here')])
for key, value in sorted(items.items()):
LOG.debug("%(key)-30s %(value)s" % {'key': key,
'value': value,
})
LOG.debug("*" * 80)
service = cls(app_name)
return service
def serve_wsgi(cls):
try:
service = cls.create()
except Exception:
logging.exception('in WsgiService.create()')
raise
service.start()
return service
def _run_wsgi(app_name):
app = config.load_paste_app(app_name)
if not app:
LOG.error(_('No known API applications configured.'))
return
server = wsgi.Server("Quantum")
server.start(app, cfg.CONF.bind_port, cfg.CONF.bind_host)
return server
| [
"[email protected]"
] | |
1b063a5d5a6f4416bf8b1ac6fbcf2c18198492dd | f6c62c253a49678e368d074302ab0358190d2f05 | /CrossMgrCamera/ScaledImage.py | 50cd5f5111eb00c9f3dcb2a2221972b2522f4fb1 | [] | no_license | Adefx/CrossMgr | 4ab0563972dfb8de173dae1542b0c322aef7ab20 | ad803339c81994a784426164c20215a67fdbaba1 | refs/heads/master | 2021-01-17T08:24:52.641643 | 2017-02-24T14:05:50 | 2017-02-24T14:05:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,260 | py | import wx
contrastColour = wx.Colour( 255, 130, 0 )
def RescaleImage( image, width, height ):
wImage = image.GetWidth()
hImage = image.GetHeight()
ratio = min( float(width) / float(wImage), float(height) / float(hImage) )
return image.Copy().Rescale( int(wImage*ratio), int(hImage*ratio), wx.IMAGE_QUALITY_NORMAL ) if not (0.94 < ratio < 1.06) else image
class ScaledImage( wx.Panel ):
def __init__( self, parent, id=wx.ID_ANY, size=(640,480), style=0, drawFinishLine=False ):
super(ScaledImage, self).__init__( parent, id, size=size, style=style )
self.SetBackgroundStyle( wx.BG_STYLE_CUSTOM )
self.image = None
self.drawFinishLine = drawFinishLine
self.Bind( wx.EVT_PAINT, self.OnPaint )
def OnPaint( self, event=None ):
dc = wx.AutoBufferedPaintDC( self )
dc.SetBackground( wx.WHITE_BRUSH )
dc.Clear()
width, height = self.GetSizeTuple()
try:
bitmap = wx.BitmapFromImage( RescaleImage(self.image, width, height) )
except Exception as e:
return
dc.DrawBitmap( bitmap, max(0,(width - bitmap.GetWidth())//2), max(0,(height - bitmap.GetHeight())//2) )
if self.drawFinishLine:
dc.SetPen( wx.Pen(contrastColour, 1) )
dc.DrawLine( width//2, 0, width//2, height )
def SetImage( self, image ):
self.image = image
self.Refresh()
def GetImage( self ):
return self.image
def SetToEmpty( self ):
width, height = self.GetSize()
bitmap = wx.EmptyBitmapRGBA( width, height, 255, 255, 255, 0 )
self.image = wx.ImageFromBitmap( bitmap )
def SetTile( self, tile ):
width, height = self.GetSize()
bitmap = wx.EmptyBitmap( width, height )
dc = wx.MemoryDC()
dc.SelectObject( bitmap )
wTile = tile.GetWidth()
hTile = tile.GetHeight()
for y in xrange( 0, height, hTile ):
for x in xrange( 0, width, wTile ):
dc.DrawBitmap( tile, x, y )
self.SetImage( bitmap.ConvertToImage() )
def SetTestImage( self ):
# Return a test image.
width, height = self.GetSize()
bitmap = wx.EmptyBitmap( width, height )
dc = wx.MemoryDC()
dc.SelectObject( bitmap )
colours = [(255,255,255), (255,0,0), (0,255,0), (0,0,255), (255,255,0), (255,0,255), (0,255,255), (0,0,0) ]
rWidth = int(float(width) / len(colours) + 0.5)
for y, hCur in ((0, height*0.75), (height*0.75, height*0.25)):
for i, c in enumerate(colours):
dc.SetBrush( wx.Brush(wx.Colour(*c), wx.SOLID) )
dc.DrawRectangle( rWidth * i, y, rWidth+1, hCur )
colours.reverse()
s = min(width, height) / 1.5
x = (width-s) / 2
y = (height-s) / 2
angle = 360.0 / len(colours)
for i, c in enumerate(colours):
dc.SetBrush( wx.Brush(wx.Colour(*c), wx.SOLID) )
dc.DrawEllipticArc(x, y, s, s, angle*i, angle*(i+1))
dc.SelectObject( wx.NullBitmap )
self.SetImage( bitmap.ConvertToImage() )
if __name__ == '__main__':
app = wx.App(False)
displayWidth, displayHeight = wx.GetDisplaySize()
imageWidth, imageHeight = 640, 480
if imageWidth*2 + 32 > displayWidth or imageHeight*2 + 32 > displayHeight:
imageWidth /= 2
imageHeight /= 2
mainWin = wx.Frame(None,title="ScaledImage", size=(imageWidth,imageHeight))
scaledImage = ScaledImage( mainWin, size=(imageWidth, imageHeight) )
scaledImage.SetTestImage()
# scaledImage.SetToEmpty()
mainWin.Show()
app.MainLoop()
| [
"[email protected]"
] | |
ac9076f960b12064af864a6c2ebfe3ba357e8c2d | 25dda94672497e3287a7403e283fb279ad171b79 | /SW Expert Academy/2806. N-Queen.py | b2fd14ecd8a8c0c0a85200aacc0298fd3c6d1edc | [] | no_license | woorud/Algorithm | c94b844e8c96a446c5fdee5c0abb159bfee384d7 | f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541 | refs/heads/master | 2023-02-23T13:53:28.645036 | 2021-01-29T12:24:23 | 2021-01-29T12:24:23 | 230,908,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | def diagnol(idx, c): # 대각선 위치 확인
for i in range(idx):
if idx-i == abs(c-map[i]): # 행 - 열의 절대값이 같으면 대각선에 위치
return True
return False
def dfs(idx):
if idx == N:
global cnt
cnt += 1
return
for i in range(N):
if visited[i]:
continue
if diagnol(idx, i):
continue
visited[i] = 1
map[idx] = i
dfs(idx+1)
visited[i] = 0
t = int(input())
for _ in range(t):
N = int(input())
map = [0 for i in range(N)]
visited = [0 for i in range(N)]
cnt = 0
dfs(0)
print('#{} {}'.format(_+1, cnt)) | [
"[email protected]"
] | |
9f10259d22ebccc53fde33fe14816c7cb021cbe3 | dbb8b269f78f07ed81032b83d50149b506610176 | /Import SVG.py | 2e748b22940ef9229bd86f51d4565fa119848d8c | [] | no_license | READ/Glyphs-Scripts | 716be6e362ba61689fa3c5593b183b9d57d8d950 | 91c252425d1315ed81e840b794c819d7dc89d9bf | refs/heads/master | 2021-01-24T03:36:38.702571 | 2012-12-15T16:46:00 | 2012-12-15T16:46:00 | 7,224,823 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,049 | py | #MenuTitle: import SVG
# encoding: utf-8
"""
Import SVG.py
Created by Georg Seifert on 2010-11-28.
Copyright (c) 2010 schriftgestaltung.de. All rights reserved.
"""
from objectsGS import *
from vanilla.dialogs import getFile
from xml.dom import minidom
Bounds = None
def stringToFloatList(String):
points = String.replace(",", " ").strip(" ").split(" ")
newPoints = []
for value in points:
try:
value = float(value)
newPoints.append(value)
except:
pass
return newPoints
def drawSVGNode(pen, node, Transform):
global Bounds
if node.localName:
if node.localName == "rect":
X = float(node.getAttribute('x'))
Y = Bounds[3] - float(node.getAttribute('y'))
W = float(node.getAttribute('width'))
H = float(node.getAttribute('height'))
pen.moveTo( Transform.transformPoint_((X, Y)) )
pen.lineTo( Transform.transformPoint_((X + W, Y)))
pen.lineTo( Transform.transformPoint_((X + W, Y - H)))
pen.lineTo( Transform.transformPoint_((X, Y - H)))
pen.closePath()
if node.localName == "circle":
CX = float(node.getAttribute('cx'))
CY = Bounds[3] - float(node.getAttribute('cy'))
R = float(node.getAttribute('r'))
pen.moveTo((CX, CY - R))
pen.curveTo( (CX + (R * 0.5523), CY - R), (CX + R, CY - (R * 0.5523)), (CX + R , CY) )
pen.curveTo( (CX + R, CY + (R * 0.5523)), (CX + (R * 0.5523), CY + R), (CX, CY + R) )
pen.curveTo( (CX - (R * 0.5523), CY + R), (CX - R, CY + (R * 0.5523)), (CX - R , CY) )
pen.curveTo( (CX - R, CY - (R * 0.5523)), (CX - (R * 0.5523), CY - R), (CX, CY - R) )
pen.closePath()
if node.localName == "path":
D = node.getAttribute('d')
parts = []
start = -1
length = -1
for i in range(len(D)):
if D[i] in ("C", "c", "L", "l", "M", "m", "s", "z", "H", "h", "V", "v"):
if start >= 0 and length > 0:
part = D[start:start+length]
part = part.replace(" ", ",")
part = part.replace("-", ",-")
part = part.replace(",,", ",")
parts.append(part)
start = i
length = 0
length += 1
if start >= 0 and length > 0:
part = D[start:start+length]
part = part.replace("-", ",-")
parts.append(part)
lastPoint = None
for part in parts:
if part[0] == "M":
point = points = stringToFloatList(part[1:])
assert(len(point) == 2)
point[1] = Bounds[3] - point[1]
pen.moveTo(Transform.transformPoint_(point))
lastPoint = point
elif part[0] == "m":
point = points = stringToFloatList(part[1:])
assert(len(point) == 2)
point[1] = - point[1]
point[0] += lastPoint[0]
point[1] += lastPoint[1]
pen.moveTo(Transform.transformPoint_(point))
lastPoint = point
elif part[0] == "C":
points = stringToFloatList(part[1:])
assert(len(points) == 6)
points = [float(Value) for Value in points]
for i in range(0, len(points), 6):
P1 = points[i:i+2]
P2 = points[i+2:i+4]
P3 = points[i+4:i+6]
P1[1] = Bounds[3] - P1[1]
P2[1] = Bounds[3] - P2[1]
P3[1] = Bounds[3] - P3[1]
pen.curveTo(Transform.transformPoint_(P1), Transform.transformPoint_(P2), Transform.transformPoint_(P3))
lastPoint = P3
elif part[0] == "c":
points = part[1:].strip(",").split(",")
points = [float(Value) for Value in points]
for i in range(0, len(points), 6):
P1 = points[i:i+2]
P2 = points[i+2:i+4]
P3 = points[i+4:i+6]
P1[0] += lastPoint[0]
P1[1] = -P1[1]
P1[1] += lastPoint[1]
P2[0] += lastPoint[0]
P2[1] = -P2[1]
P2[1] += lastPoint[1]
P3[0] += lastPoint[0]
P3[1] = -P3[1]
P3[1] += lastPoint[1]
pen.curveTo(Transform.transformPoint_(P1), Transform.transformPoint_(P2), Transform.transformPoint_(P3))
lastPoint = P3
elif part[0] == "S":
points = part[1:].strip(",").split(",")
points = [float(Value) for Value in points]
if (pen.contour[-1][1] == 'curve'):
P1 = list(pen.contour[-2][0])
P1[0] = lastPoint[0] - (P1[0] - lastPoint[0])
P1[1] = lastPoint[1] - (P1[1] - lastPoint[1])
else:
P1 = list(lastPoint)
P2 = points[0:2]
P3 = points[2:4]
P2[1] = Bounds[3] - P2[1]
P3[1] = Bounds[3] - P3[1]
pen.curveTo(Transform.transformPoint_(P1), Transform.transformPoint_(P2), Transform.transformPoint_(P3))
lastPoint = P3
elif part[0] == "s":
points = part[1:].strip(",").split(",")
points = [float(Value) for Value in points]
if (pen.contour[-1][1] == 'curve'):
P1 = list(pen.contour[-2][0])
P1[0] = lastPoint[0] - (P1[0] - lastPoint[0])
P1[1] = lastPoint[1] - (P1[1] - lastPoint[1])
else:
P1 = list(lastPoint)
P2 = points[0:2]
P3 = points[2:4]
P2[0] += lastPoint[0]
P2[1] = -P2[1]
P2[1] += lastPoint[1]
P3[0] += lastPoint[0]
P3[1] = -P3[1]
P3[1] += lastPoint[1]
pen.curveTo(Transform.transformPoint_(P1), Transform.transformPoint_(P2), Transform.transformPoint_(P3))
lastPoint = P3
elif part[0] == "L":
points = stringToFloatList(part[1:])
for i in range(0, len(points), 2):
points[i+1] = Bounds[3] - points[i+1]
pen.lineTo(points[i:i+2])
lastPoint = points[i:i+2]
elif part[0] == "l":
points = part[1:].strip(",").split(",")
points = [float(Value) for Value in points]
for i in range(0, len(points), 2):
points[i] += lastPoint[0]
points[i+1] = -points[i+1]
points[i+1] += lastPoint[1]
pen.lineTo(Transform.transformPoint_(points[i:i+2]))
lastPoint = points[i:i+2]
elif part[0] == "H":
point = float(part[1:].strip(","))
points = []
points.append(point)
points.append(lastPoint[1])
pen.lineTo(Transform.transformPoint_(points))
lastPoint = points
elif part[0] == "h":
point = float(part[1:].strip(","))
points = []
points.append(point + lastPoint[0])
points.append(lastPoint[1])
pen.lineTo(Transform.transformPoint_(points))
lastPoint = points
elif part[0] == "V":
point = float(part[1:].strip(","))
points = []
points.append(lastPoint[0])
points.append(Bounds[3] - point)
pen.lineTo(Transform.transformPoint_(points))
lastPoint = points
elif part[0] == "v":
point = float(part[1:].strip(","))
points = []
points.append(lastPoint[0])
points.append(-point + lastPoint[1])
pen.lineTo(Transform.transformPoint_(points))
lastPoint = points
elif part[0] == "z":
pen.closePath()
if parts[-1] != "z":
pen.endPath()
if node.localName == "polygon":
points = node.getAttribute('points').strip(" ")
points = points.split(" ")
point = points[0].split(",")
point = [float(Value) for Value in point]
point[1] = Bounds[3] - point[1]
pen.moveTo(point)
for i in range(1, len(points), 1):
point = stringToFloatList(points[i])
if len(point) == 2:
point[1] = Bounds[3] - point[1]
pen.lineTo(Transform.transformPoint_(point))
pen.closePath()
if node.localName == "g":
TransformString = node.getAttribute('transform')
Transform = NSAffineTransform.alloc().init()
if TransformString:
TransformStrings = TransformString.split(" ")
TransformStrings.reverse()
for TransformStringElement in TransformStrings:
if TransformStringElement.startswith("matrix"):
Values = stringToFloatList(TransformStringElement[7:-1])
Transform.setTransformStruct_(Values)
elif TransformStringElement.startswith("translate"):
Values = stringToFloatList(TransformStringElement[10:-1])
if len(Values) == 2:
Values[1] = Bounds[3] - Values[1]
Transform.translateXBy_yBy_(Values[0], Values[1])
else:
Transform.translateXBy_yBy_(Values[0], Values[0])
elif TransformStringElement.startswith("scale"):
Values = stringToFloatList(TransformStringElement[6:-1])
if len(Values) == 2:
Transform.scaleXBy_yBy_(Values[0], Values[1])
else:
Transform.scaleBy_(Values[0])
# else if ([TransformStringElement hasPrefix:@"rotate"]) {
#
# }
# else if ([TransformStringElement hasPrefix:@"skewX"]) {
#
# }
# else if ([TransformStringElement hasPrefix:@"skewY"]) {
#
# }
for subNode in node.childNodes:
drawSVGNode(pen, subNode, Transform)
def main():
global Bounds
g = CurrentGlyph()
print g
pen = g.getPen()
path = getFile(title="Please select a .svg", fileTypes=["svg"])
if path:
dom = minidom.parse(path[0])
SVG = dom.getElementsByTagName("svg")[0]
Bounds = SVG.getAttribute('viewBox').split(" ")
if (len(Bounds) == 4):
Bounds = [float(Value) for Value in Bounds]
else:
Width = SVG.getAttribute("width")
Height = SVG.getAttribute("height")
if Width and Height:
Bounds = [0, 0, float(Width), float(Height) ]
for node in dom.getElementsByTagName("svg")[0].childNodes:
drawSVGNode(pen, node, NSAffineTransform.alloc().init())
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
d2ac1e03d5df1efcebfca1377db95f6e07600de8 | 24d8cf871b092b2d60fc85d5320e1bc761a7cbe2 | /eXe/rev2283-2366/left-trunk-2366/twisted/test/myrebuilder2.py | 9eb92e11450989004c198b1d66a4b7c045cba6d0 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | joliebig/featurehouse_fstmerge_examples | af1b963537839d13e834f829cf51f8ad5e6ffe76 | 1a99c1788f0eb9f1e5d8c2ced3892d00cd9449ad | refs/heads/master | 2016-09-05T10:24:50.974902 | 2013-03-28T16:28:47 | 2013-03-28T16:28:47 | 9,080,611 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | class A:
def a(self):
return 'b'
try:
object
except NameError:
pass
else:
class B(A, object):
def b(self):
return 'c'
class Inherit(A):
def a(self):
return 'd'
| [
"[email protected]"
] | |
c46a875f4d21e9a5a2a243ec852c6a2644b74d29 | bc268abf22dc2a4888a81946eacf313487a86219 | /doc/conf.py | 6ba4298ffcefd8dff4fb6d30560f1f53081e50fa | [
"MIT"
] | permissive | Coderguy321/imbalanced-learn | 646a320c4bbc458a760ab548e00b86dc534477e4 | 032fa7c0cc46e6565d9afcb5279a5345542cccb9 | refs/heads/master | 2021-05-04T08:53:41.131939 | 2016-10-06T12:47:49 | 2016-10-06T12:47:49 | 70,373,945 | 1 | 0 | null | 2016-10-09T03:25:15 | 2016-10-09T03:25:15 | null | UTF-8 | Python | false | false | 10,006 | py | # -*- coding: utf-8 -*-
#
# imbalanced-learn documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 18 14:44:12 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ---------------------------------------------------
# Try to override the matplotlib configuration as early as possible
try:
import gen_rst
except:
pass
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'numpydoc',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx_gallery.gen_gallery',
'sphinx.ext.autosummary'
]
autosummary_generate = True
autodoc_default_flags = ['members', 'inherited-members']
sphinx_gallery_conf = {
# path to your examples scripts
'examples_dirs' : '../examples',
# path where to save gallery generated examples
'gallery_dirs' : 'auto_examples'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# Generate the plots for the gallery
plot_gallery = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'imbalanced-learn'
copyright = u'2016, G. Lemaitre, F. Nogueira, D. Oliveira, C. Aridas'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
__version__ = '0.2.0.dev0'
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'imbalanced-learndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'imbalanced-learn.tex', u'imbalanced-learn Documentation',
u'G. Lemaitre, F. Nogueira, D. Oliveira, C. Aridas', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'imbalanced-learn', u'imbalanced-learn Documentation',
[u'G. Lemaitre, F. Nogueira, D. Oliveira, C. Aridas'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'imbalanced-learn', u'imbalanced-learn Documentation',
u'G. Lemaitre, F. Nogueira, D. Oliveira, C. Aridas', 'imbalanced-learn', 'Toolbox for imbalanced dataset in machine learning.',
'Miscellaneous'),
]
def generate_example_rst(app, what, name, obj, options, lines):
# generate empty examples files, so that we don't get
# inclusion errors if there are no examples for a class / module
examples_path = os.path.join(app.srcdir, "modules", "generated",
"%s.examples" % name)
if not os.path.exists(examples_path):
# touch file
open(examples_path, 'w').close()
def setup(app):
app.connect('autodoc-process-docstring', generate_example_rst)
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| [
"[email protected]"
] | |
9d49c884c0df31778961a4fde18b09ec0e3aac9d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02420/s842100220.py | 389376fb975d7637a465bc9f05a0d09e6c60ebbb | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | while True:
first = input().rstrip()
if first == '-': break
m = int(input())
for _ in range(m):
h = int(input())
first = first[h:] + first[0:h]
print(first)
| [
"[email protected]"
] | |
1a08b0f2979c899b504f8fc87218e7d979d9d652 | 8255dcf7689c20283b5e75a452139e553b34ddf3 | /app/views/dashboard/media/photos.py | ae8b16add3758cab891abe9a8780cbfb5dd38862 | [
"MIT"
] | permissive | Wern-rm/raton.by | 09871eb4da628ff7b0d0b4415a150cf6c12c3e5a | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | refs/heads/main | 2023-05-06T02:26:58.980779 | 2021-05-25T14:09:47 | 2021-05-25T14:09:47 | 317,119,285 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,815 | py | from flask import render_template, redirect, url_for, request, current_app
from flask_login import login_required
from app import db, logger
from app.controllers.dashboard_controller import dashboard_controller
from app.forms.dashboard_media import MediaPhotos
from app.models.photo_catalogs import PhotoCatalogs
from app.models.photos import Photos
from app.utils.flask_upload_files import UploadFiles, IMAGES
from app.views.dashboard import bp
@bp.route('/media/photos/<int:catalog_id>', methods=['GET', 'POST'])
@login_required
@dashboard_controller
def media_photos(catalog_id: int, **kwargs):
catalog = db.session.query(PhotoCatalogs).filter(PhotoCatalogs.id == catalog_id).first()
if not catalog:
return redirect(url_for('dashboard.media'))
form = MediaPhotos()
uploader = UploadFiles(basedir=current_app.config.get('STATIC_APP'), storage='uploads', extensions=IMAGES)
if form.validate_on_submit() and request.form['form-id'] == '1':
try:
filename = uploader.save(file=form.file.data)
file_url = uploader.get_path(filename=filename)
db.session.add(Photos(catalog_id=catalog_id, url=file_url))
db.session.commit()
return redirect(url_for('dashboard.media_photos', catalog_id=catalog_id, action='success', id=37))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.media_photos', catalog_id=catalog_id, action='warning', id=1))
kwargs['title'] = 'Управление медиа'
kwargs['data'] = db.session.query(Photos).filter(Photos.catalog_id == catalog_id).order_by(Photos.id).all()
kwargs['form'] = form
kwargs['catalog'] = catalog
return render_template("dashboard/media/photos.html", **kwargs) | [
"[email protected]"
] | |
07324d1a4440f3a9e0e462ca96ad5027842c622d | 84c9a6fb5e18741f14a55d0d737e2a556383770d | /venv/Scripts/easy_install-script.py | 3f672fec5a5c0e737faacd797657588a42ed2a5a | [] | no_license | AravindChan96/Vulcan | 638a1db2f84df08bc50dd76c7f142014d529fbec | 5548a6f36f04108ac1a6ed8e707930f9821f0bd9 | refs/heads/master | 2022-11-05T15:05:54.224578 | 2020-06-19T20:44:14 | 2020-06-19T20:44:14 | 273,396,348 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | #!C:\Users\aravi\PycharmProjects\VulnerabilityScanner\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
5911126de8a3316316c55a211087dfce6aca418a | 13d222bc3332378d433835914da26ed16b583c8b | /src/pemjh/challenge39/main.py | 8f4884cd82bb3bbb2d75381260a80f02e168258f | [] | no_license | mattjhussey/pemjh | c27a09bab09cd2ade31dc23fffac07374bea9366 | 2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99 | refs/heads/master | 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 894 | py | """ Challenge039 """
def number_of_perimeters(length):
"""
>>> number_of_perimeters(120)
3
"""
count = 0
# c > b >= a
# a + b > c
# a + b + c = n
# n = 100
# 1 <= a <= 33
# a <= b <= (n - a) / 2 + a
# b <= c <= n - a - b
limit = length // 3
if length % 3 != 0:
limit += 1
for a_length in range(1, limit):
b_limit = (length - a_length) // 2 + a_length
for b_length in range(a_length, b_limit):
c_length = length - a_length - b_length
if (a_length**2 + b_length**2) == (c_length**2):
count += 1
break
return count
def main():
""" challenge039 """
limit = 1000
results = [(number_of_perimeters(i), i)
for i in range(4, limit + 1, 2)]
return max(results, key=lambda i: i[0])[1]
| [
"[email protected]"
] | |
5bb3b06fa5b6ac7552d33eaa640de020e126f6c3 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_hampshire.py | 46f4aec748d11c63eb044ddd89f4c44dd82194b4 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py |
#calss header
class _HAMPSHIRE():
def __init__(self,):
self.name = "HAMPSHIRE"
self.definitions = [u'a county (= area with its own local government) in southern England']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
94731e2c76a433376aed579f28bc5fc10c71f7c6 | 7e627a6f120f5e668d09f8b362c76f2592430a92 | /dictionaria/assets.py | de3831bff39e3ef48f68a6fa44076febf86ba784 | [
"Apache-2.0"
] | permissive | LimorNaaman/dictionaria | a541bb9d812d8f4b5fb340b9525f2d136d28a40f | 9f8a5139af99eb4ae1af9ed0b340120c486cf112 | refs/heads/master | 2020-03-15T12:05:25.162179 | 2018-06-05T18:01:49 | 2018-06-05T18:01:49 | 132,136,322 | 0 | 0 | Apache-2.0 | 2018-06-05T18:01:50 | 2018-05-04T12:16:53 | Mako | UTF-8 | Python | false | false | 282 | py | from clld.web.assets import environment
from clldutils.path import Path
import dictionaria
environment.append_path(
Path(dictionaria.__file__).parent.joinpath('static').as_posix(),
url='/dictionaria:static/')
environment.load_path = list(reversed(environment.load_path))
| [
"[email protected]"
] | |
06e1d46135ac73c4d98acecedfbc42f6b36f52fd | 892dd32ee0be7135cd33c875b06dcc66307dcc99 | /automation/MPTS/apikey_set_delegatedAdmin.py | ece407e17d0b344e8ac87658a5b870a8be88ba74 | [] | no_license | cloudbytestorage/devops | 6d21ed0afd752bdde8cefa448d4433b435493ffa | b18193b08ba3d6538277ba48253c29d6a96b0b4a | refs/heads/master | 2020-05-29T08:48:34.489204 | 2018-01-03T09:28:53 | 2018-01-03T09:28:53 | 68,889,307 | 4 | 8 | null | 2017-11-30T08:11:39 | 2016-09-22T05:53:44 | Python | UTF-8 | Python | false | false | 3,694 | py | import json
import requests
import md5
import fileinput
import sys
import time
from cbrequest import sendrequest, filesave, timetrack, queryAsyncJobResult, configFile, configFileName
config = configFile(sys.argv);
configfilename = configFileName(sys.argv);
stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
#######Generate Apikeys for Site Admin
for x in range(1, int(config['Number_of_SiteAdmins'])+1):
### List Users
querycommand = 'command=listUsers'
resp_listUsers = sendrequest(stdurl, querycommand)
filesave("logs/CurrentUsersList.txt", "w", resp_listUsers)
data = json.loads(resp_listUsers.text)
users = data["listusersresponse"]["user"]
user_id = ""
for user in users:
if "%s" %(config['siteAdminUsername%d' %(x)]) == user['account']:
user_id = user['id']
print user['account']
print user_id
m = md5.new()
m.update("%s" %(config['siteAdminPassword%d' %(x)]))
md5_site_pwd = m.hexdigest()
### Generate ApiKey
querystring = 'command=registerUserKeys&id=%s' %(user_id)
resp_registerUserKeys = sendrequest(stdurl, querystring)
filesave("logs/registerUserkeys.txt", "w", resp_registerUserKeys)
data = json.loads(resp_registerUserKeys.text)
#print data
try:
apikey = data["registeruserkeysresponse"]["userkeys"]["apikey"]
print "Current Apikey from Devman --- "+apikey
except:
print "Didnt get Apikey"
continue
existingApikey = "%s" %(config['siteAdminApikey%d' %(x)])
print "Existing API Key in Config File --- "+existingApikey
print "ConfigFilename %s" %(configfilename)
for line in fileinput.FileInput('%s' %(configfilename),inplace=1):
line = line.replace(existingApikey,apikey)
print line,
fileinput.close()
print "End of loop1"
#############Apikey Generated for Site Admin
#config = configFile(sys.argv);
#configfilename = configFileName(sys.argv);
#stdurl = 'https://%s/client/api?apikey=%s&response=%s&' %(config['host'], config['apikey'], config['response'])
#######Generate Apikeys for HA Admin
for x in range(1, int(config['Number_of_HAAdmins'])+1):
### List Users
querycommand = 'command=listUsers'
resp_listUsers = sendrequest(stdurl, querycommand)
filesave("logs/CurrentUsersList.txt", "w", resp_listUsers)
data = json.loads(resp_listUsers.text)
users = data["listusersresponse"]["user"]
user_id = ""
for user in users:
if "%s" %(config['haAdminUsername%d' %(x)]) == user['account']:
user_id = user['id']
print user['account']
print user_id
m = md5.new()
m.update("%s" %(config['haAdminPassword%d' %(x)]))
md5_ha_pwd = m.hexdigest()
### Generate ApiKey
querystring = 'command=registerUserKeys&id=%s' %(user_id)
resp_registerUserKeys = sendrequest(stdurl, querystring)
filesave("logs/registerUserkeys.txt", "w", resp_registerUserKeys)
data = json.loads(resp_registerUserKeys.text)
#print data
try:
hapikey = data["registeruserkeysresponse"]["userkeys"]["apikey"]
print "Current Apikey from Devman --- "+hapikey
except:
print "Didnt get Apikey"
continue
hexistingApikey = "%s" %(config['haAdminApikey%d' %(x)])
print "Existing API Key in Config File --- "+hexistingApikey
print "ConfigFilename for HA %s" %(configfilename)
for line in fileinput.FileInput('%s' %(configfilename),inplace=1):
line = line.replace(hexistingApikey,hapikey)
print line,
fileinput.close()
print "End of loop1"
#############Apikey Generated
| [
"[email protected]"
] | |
eeb702c8ed77e9a4ec9c69d8a372d33e8b0f6823 | c753216f44c4c5f34d50763a02d720e064ed5d13 | /OPSI/web2/test/test_server.py | 6a12fe7d22ef567ba7a8c4ea760a994618cda3a3 | [] | no_license | mpice-mn/python-opsi | 7fefcd590213a5b698022323b166710e8cbf5641 | 76dcd4e38100e019f64731539b31be6e8af60af7 | refs/heads/stable | 2023-05-02T05:25:31.478822 | 2020-02-05T21:16:50 | 2020-02-05T21:16:50 | 104,738,074 | 0 | 0 | null | 2017-09-25T10:49:13 | 2017-09-25T10:49:13 | null | UTF-8 | Python | false | false | 20,493 | py | """
A test harness for the OPSI.web2 server.
"""
from zope.interface import implements
from twisted.python import components
from OPSI.web2 import http, http_headers, iweb, server
from OPSI.web2 import resource, stream, compat
from twisted.trial import unittest, util
from twisted.internet import reactor, defer, address, error as ti_error
class NotResource(object):
"""
Class which does not implement IResource.
Used as an adaptee by L{AdaptionTestCase.test_registered} to test that
if an object which does not provide IResource is adapted to IResource
and there is an adapter to IResource registered, that adapter is used.
"""
class ResourceAdapter(object):
"""
Adapter to IResource.
Registered as an adapter from NotResource to IResource so that
L{AdaptionTestCase.test_registered} can test that such an adapter will
be used.
"""
implements(iweb.IResource)
def __init__(self, original):
pass
components.registerAdapter(ResourceAdapter, NotResource, iweb.IResource)
class NotOldResource(object):
"""
Class which does not implement IOldNevowResource or IResource.
Used as an adaptee by L{AdaptionTestCase.test_transitive} to test that
if an object which does not provide IResource or IOldNevowResource is
adapted to IResource and there is an adapter to IOldNevowResource
registered, first that adapter is used, then the included adapter from
IOldNevowResource to IResource is used.
"""
class OldResourceAdapter(object):
"""
Adapter to IOldNevowResource.
Registered as an adapter from NotOldResource to IOldNevowResource so
that L{AdaptionTestCase.test_transitive} can test that such an adapter
will be used to allow the initial input to be adapted to IResource.
"""
implements(iweb.IOldNevowResource)
def __init__(self, original):
pass
components.registerAdapter(OldResourceAdapter, NotOldResource, iweb.IOldNevowResource)
class AdaptionTestCase(unittest.TestCase):
"""
Test the adaption of various objects to IResource.
Necessary due to the special implementation of __call__ on IResource
which extends the behavior provided by the base Interface.__call__.
"""
def test_unadaptable(self):
"""
Test that attempting to adapt to IResource an object not adaptable
to IResource raises an exception or returns the specified alternate
object.
"""
class Unadaptable(object):
pass
self.assertRaises(TypeError, iweb.IResource, Unadaptable())
alternate = object()
self.assertIdentical(iweb.IResource(Unadaptable(), alternate), alternate)
def test_redundant(self):
"""
Test that the adaption to IResource of an object which provides
IResource returns the same object.
"""
class Resource(object):
implements(iweb.IResource)
resource = Resource()
self.assertIdentical(iweb.IResource(resource), resource)
def test_registered(self):
"""
Test that if an adapter exists which can provide IResource for an
object which does not provide it, that adapter is used.
"""
notResource = NotResource()
self.failUnless(isinstance(iweb.IResource(notResource), ResourceAdapter))
def test_oldResources(self):
"""
Test that providers of L{IOldNevowResource} can be adapted to
IResource automatically.
"""
class OldResource(object):
implements(iweb.IOldNevowResource)
oldResource = OldResource()
resource = iweb.IResource(oldResource)
self.failUnless(isinstance(resource, compat.OldNevowResourceAdapter))
def test_transitive(self):
"""
Test that a special-case transitive adaption from something to
IOldNevowResource to IResource is possible.
"""
notResource = NotOldResource()
resource = iweb.IResource(notResource)
self.failUnless(isinstance(resource, compat.OldNevowResourceAdapter))
class SimpleRequest(server.Request):
"""I can be used in cases where a Request object is necessary
but it is benificial to bypass the chanRequest
"""
clientproto = (1,1)
def __init__(self, site, method, uri, headers=None, content=None):
if not headers:
headers = http_headers.Headers(headers)
super(SimpleRequest, self).__init__(
site=site,
chanRequest=None,
command=method,
path=uri,
version=self.clientproto,
contentLength=len(content or ''),
headers=headers)
self.stream = stream.MemoryStream(content or '')
self.remoteAddr = address.IPv4Address('TCP', '127.0.0.1', 0)
self._parseURL()
self.host = 'localhost'
self.port = 8080
def writeResponse(self, response):
return response
class TestChanRequest:
implements(iweb.IChanRequest)
hostInfo = address.IPv4Address('TCP', 'host', 80), False
remoteHost = address.IPv4Address('TCP', 'remotehost', 34567)
def __init__(self, site, method, prepath, uri, length=None,
headers=None, version=(1,1), content=None):
self.site = site
self.method = method
self.prepath = prepath
self.uri = uri
if headers is None:
headers = http_headers.Headers()
self.headers = headers
self.http_version = version
# Anything below here we do not pass as arguments
self.request = server.Request(self,
self.method,
self.uri,
self.http_version,
length,
self.headers,
site=self.site,
prepathuri=self.prepath)
if content is not None:
self.request.handleContentChunk(content)
self.request.handleContentComplete()
self.code = None
self.responseHeaders = None
self.data = ''
self.deferredFinish = defer.Deferred()
def writeIntermediateResponse(code, headers=None):
pass
def writeHeaders(self, code, headers):
self.responseHeaders = headers
self.code = code
def write(self, data):
self.data += data
def finish(self, failed=False):
result = self.code, self.responseHeaders, self.data, failed
self.finished = True
self.deferredFinish.callback(result)
def abortConnection(self):
self.finish(failed=True)
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
def getHostInfo(self):
return self.hostInfo
def getRemoteHost(self):
return self.remoteHost
class BaseTestResource(resource.Resource):
responseCode = 200
responseText = 'This is a fake resource.'
responseHeaders = {}
addSlash = False
def __init__(self, children=[]):
"""
@type children: C{list} of C{tuple}
@param children: a list of ('path', resource) tuples
"""
for i in children:
self.putChild(i[0], i[1])
def render(self, req):
return http.Response(self.responseCode, headers=self.responseHeaders,
stream=self.responseStream())
def responseStream(self):
return stream.MemoryStream(self.responseText)
_unset = object()
class BaseCase(unittest.TestCase):
"""
Base class for test cases that involve testing the result
of arbitrary HTTP(S) queries.
"""
method = 'GET'
version = (1, 1)
wait_timeout = 5.0
def chanrequest(self, root, uri, length, headers, method, version, prepath, content):
site = server.Site(root)
return TestChanRequest(site, method, prepath, uri, length, headers, version, content)
def getResponseFor(self, root, uri, headers={},
method=None, version=None, prepath='', content=None, length=_unset):
if not isinstance(headers, http_headers.Headers):
headers = http_headers.Headers(headers)
if length is _unset:
if content is not None:
length = len(content)
else:
length = 0
if method is None:
method = self.method
if version is None:
version = self.version
cr = self.chanrequest(root, uri, length, headers, method, version, prepath, content)
cr.request.process()
return cr.deferredFinish
def assertResponse(self, request_data, expected_response, failure=False):
"""
@type request_data: C{tuple}
@type expected_response: C{tuple}
@param request_data: A tuple of arguments to pass to L{getResponseFor}:
(root, uri, headers, method, version, prepath).
Root resource and requested URI are required,
and everything else is optional.
@param expected_response: A 3-tuple of the expected response:
(responseCode, headers, htmlData)
"""
d = self.getResponseFor(*request_data)
d.addCallback(self._cbGotResponse, expected_response, failure)
return d
def _cbGotResponse(self, (code, headers, data, failed), expected_response, expectedfailure=False):
expected_code, expected_headers, expected_data = expected_response
self.assertEquals(code, expected_code)
if expected_data is not None:
self.assertEquals(data, expected_data)
for key, value in expected_headers.iteritems():
self.assertEquals(headers.getHeader(key), value)
self.assertEquals(failed, expectedfailure)
class SampleWebTest(BaseCase):
class SampleTestResource(BaseTestResource):
addSlash = True
def child_validChild(self, req):
f = BaseTestResource()
f.responseCode = 200
f.responseText = 'This is a valid child resource.'
return f
def child_missingChild(self, req):
f = BaseTestResource()
f.responseCode = 404
f.responseStream = lambda self: None
return f
def child_remoteAddr(self, req):
f = BaseTestResource()
f.responseCode = 200
f.responseText = 'Remote Addr: %r' % req.remoteAddr.host
return f
def setUp(self):
self.root = self.SampleTestResource()
def test_root(self):
return self.assertResponse(
(self.root, 'http://host/'),
(200, {}, 'This is a fake resource.'))
def test_validChild(self):
return self.assertResponse(
(self.root, 'http://host/validChild'),
(200, {}, 'This is a valid child resource.'))
def test_invalidChild(self):
return self.assertResponse(
(self.root, 'http://host/invalidChild'),
(404, {}, None))
def test_remoteAddrExposure(self):
return self.assertResponse(
(self.root, 'http://host/remoteAddr'),
(200, {}, "Remote Addr: 'remotehost'"))
def test_leafresource(self):
class TestResource(resource.LeafResource):
def render(self, req):
return http.Response(stream="prepath:%s postpath:%s" % (
req.prepath,
req.postpath))
return self.assertResponse(
(TestResource(), 'http://host/consumed/path/segments'),
(200, {}, "prepath:[] postpath:['consumed', 'path', 'segments']"))
def test_redirectResource(self):
redirectResource = resource.RedirectResource(scheme='https',
host='localhost',
port=443,
path='/foo',
querystring='bar=baz')
return self.assertResponse(
(redirectResource, 'http://localhost/'),
(301, {'location': 'https://localhost/foo?bar=baz'}, None))
class URLParsingTest(BaseCase):
class TestResource(resource.LeafResource):
def render(self, req):
return http.Response(stream="Host:%s, Path:%s"%(req.host, req.path))
def setUp(self):
self.root = self.TestResource()
def test_normal(self):
return self.assertResponse(
(self.root, '/path', {'Host':'host'}),
(200, {}, 'Host:host, Path:/path'))
def test_fullurl(self):
return self.assertResponse(
(self.root, 'http://host/path'),
(200, {}, 'Host:host, Path:/path'))
def test_strangepath(self):
# Ensure that the double slashes don't confuse it
return self.assertResponse(
(self.root, '//path', {'Host':'host'}),
(200, {}, 'Host:host, Path://path'))
def test_strangepathfull(self):
return self.assertResponse(
(self.root, 'http://host//path'),
(200, {}, 'Host:host, Path://path'))
class TestDeferredRendering(BaseCase):
class ResourceWithDeferreds(BaseTestResource):
addSlash=True
responseText = 'I should be wrapped in a Deferred.'
def render(self, req):
d = defer.Deferred()
reactor.callLater(
0, d.callback, BaseTestResource.render(self, req))
return d
def child_deferred(self, req):
d = defer.Deferred()
reactor.callLater(0, d.callback, BaseTestResource())
return d
def test_deferredRootResource(self):
return self.assertResponse(
(self.ResourceWithDeferreds(), 'http://host/'),
(200, {}, 'I should be wrapped in a Deferred.'))
def test_deferredChild(self):
return self.assertResponse(
(self.ResourceWithDeferreds(), 'http://host/deferred'),
(200, {}, 'This is a fake resource.'))
class RedirectResourceTest(BaseCase):
def html(url):
return "<html><head><title>Moved Permanently</title></head><body><h1>Moved Permanently</h1><p>Document moved to %s.</p></body></html>" % (url,)
html = staticmethod(html)
def test_noRedirect(self):
# This is useless, since it's a loop, but hey
ds = []
for url in ("http://host/", "http://host/foo"):
ds.append(self.assertResponse(
(resource.RedirectResource(), url),
(301, {"location": url}, self.html(url))
))
return defer.DeferredList(ds, fireOnOneErrback=True)
def test_hostRedirect(self):
ds = []
for url1, url2 in (
("http://host/", "http://other/"),
("http://host/foo", "http://other/foo"),
):
ds.append(self.assertResponse(
(resource.RedirectResource(host="other"), url1),
(301, {"location": url2}, self.html(url2))
))
return defer.DeferredList(ds, fireOnOneErrback=True)
def test_pathRedirect(self):
root = BaseTestResource()
redirect = resource.RedirectResource(path="/other")
root.putChild("r", redirect)
ds = []
for url1, url2 in (
("http://host/r", "http://host/other"),
("http://host/r/foo", "http://host/other"),
):
ds.append(self.assertResponse(
(resource.RedirectResource(path="/other"), url1),
(301, {"location": url2}, self.html(url2))
))
return defer.DeferredList(ds, fireOnOneErrback=True)
class EmptyResource(resource.Resource):
def __init__(self, test):
self.test = test
def render(self, request):
self.test.assertEquals(request.urlForResource(self), self.expectedURI)
return 201
class RememberURIs(BaseCase):
"""
Tests for URI memory and lookup mechanism in server.Request.
"""
def test_requestedResource(self):
"""
Test urlForResource() on deeply nested resource looked up via
request processing.
"""
root = EmptyResource(self)
root.expectedURI = "/"
foo = EmptyResource(self)
foo.expectedURI = "/foo"
root.putChild("foo", foo)
bar = EmptyResource(self)
bar.expectedURI = foo.expectedURI + "/bar"
foo.putChild("bar", bar)
baz = EmptyResource(self)
baz.expectedURI = bar.expectedURI + "/baz"
bar.putChild("baz", baz)
ds = []
for uri in (foo.expectedURI, bar.expectedURI, baz.expectedURI):
ds.append(self.assertResponse(
(root, uri, {'Host':'host'}),
(201, {}, None),
))
return defer.DeferredList(ds, fireOnOneErrback=True)
def test_urlEncoding(self):
"""
Test to make sure that URL encoding is working.
"""
root = EmptyResource(self)
root.expectedURI = "/"
child = EmptyResource(self)
child.expectedURI = "/foo%20bar"
root.putChild("foo bar", child)
return self.assertResponse(
(root, child.expectedURI, {'Host':'host'}),
(201, {}, None)
)
def test_locateResource(self):
"""
Test urlForResource() on resource looked up via a locateResource() call.
"""
root = resource.Resource()
child = resource.Resource()
root.putChild("foo", child)
request = SimpleRequest(server.Site(root), "GET", "/")
def gotResource(resource):
self.assertEquals("/foo", request.urlForResource(resource))
d = defer.maybeDeferred(request.locateResource, "/foo")
d.addCallback(gotResource)
return d
def test_unknownResource(self):
"""
Test urlForResource() on unknown resource.
"""
root = resource.Resource()
child = resource.Resource()
request = SimpleRequest(server.Site(root), "GET", "/")
self.assertRaises(server.NoURLForResourceError, request.urlForResource, child)
def test_locateChildResource(self):
"""
Test urlForResource() on deeply nested resource looked up via
locateChildResource().
"""
root = EmptyResource(self)
root.expectedURI = "/"
foo = EmptyResource(self)
foo.expectedURI = "/foo"
root.putChild("foo", foo)
bar = EmptyResource(self)
bar.expectedURI = "/foo/bar"
foo.putChild("bar", bar)
baz = EmptyResource(self)
baz.expectedURI = "/foo/bar/b%20a%20z"
bar.putChild("b a z", baz)
request = SimpleRequest(server.Site(root), "GET", "/")
def gotResource(resource):
# Make sure locateChildResource() gave us the right answer
self.assertEquals(resource, bar)
return request.locateChildResource(resource, "b a z").addCallback(gotChildResource)
def gotChildResource(resource):
# Make sure locateChildResource() gave us the right answer
self.assertEquals(resource, baz)
self.assertEquals(resource.expectedURI, request.urlForResource(resource))
d = request.locateResource(bar.expectedURI)
d.addCallback(gotResource)
return d
def test_deferredLocateChild(self):
"""
Test deferred value from locateChild()
"""
class DeferredLocateChild(resource.Resource):
def locateChild(self, req, segments):
return defer.maybeDeferred(
super(DeferredLocateChild, self).locateChild,
req, segments
)
root = DeferredLocateChild()
child = resource.Resource()
root.putChild("foo", child)
request = SimpleRequest(server.Site(root), "GET", "/foo")
def gotResource(resource):
self.assertEquals("/foo", request.urlForResource(resource))
d = request.locateResource("/foo")
d.addCallback(gotResource)
return d
| [
"[email protected]"
] | |
d9d31ca5bdaefea63563a4acb77cd20e1e91a9a6 | 3b5f28ed1505c68f94ec1df496fe061d110294ce | /lixian_alias.py | fa09dd28ac832bbfe0362309b4481423e064db57 | [
"MIT"
] | permissive | yuanlizbyy/xunlei-lixian | 089d388fbf4023bfae217906268c19dde43528e1 | fe96ee19c1af8a268dc39818a5e8d33ff71e50ee | refs/heads/master | 2021-01-17T21:48:13.932068 | 2012-12-10T05:35:43 | 2012-12-10T05:35:43 | 7,854,959 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py |
def get_aliases():
return {'d': 'download', 'l': 'list', 'a': 'add', 'x': 'delete'}
def get_alias(a):
aliases = get_aliases()
if a in aliases:
return aliases[a]
def to_alias(a):
return get_alias(a) or a
| [
"[email protected]"
] | |
38394182dc771084f43996ec5c98459435358d6b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_terse.py | 6f23ba45e2ed3a286c756f26ee23e7427c152b34 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py |
#calss header
class _TERSE():
def __init__(self,):
self.name = "TERSE"
self.definitions = [u'using few words, sometimes in a way that seems rude or unfriendly: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'adjectives'
def run(self, obj1, obj2):
self.jsondata[obj2] = {}
self.jsondata[obj2]['properties'] = self.name.lower()
return self.jsondata
| [
"[email protected]"
] | |
cd521274e84ead7de41a90a16c88da24457be533 | 2cdd957f6cbf326ea902160011cb4f496e037bf9 | /python_oops/prk2.py | 1ae8c2a8d79c5f80d9275cabe183ae4f7ca24d16 | [] | no_license | Nitesh101/thundersoft | 81511c5672e8cb61055818b59fd216b26a784b1e | aa5cef1cfeb8a00d438a5280dff231bda494252d | refs/heads/master | 2020-09-24T20:56:53.655269 | 2019-12-04T10:43:23 | 2019-12-04T10:43:23 | 225,841,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | class parent():
def first(self):
print("first function")
class child(parent):
def second(self):
print("second functtion")
f = child()
f.first()
f.second()
| [
"[email protected]"
] | |
68ba2eba29b311b644c732d07fe15d436790ceed | da1dbb0e1c8c323bbf7ba0eac43b5815ce075282 | /python/ccxt/coinbase.py | 6f59d7b3d60bb8a98ea58eba5a10b1352aae46d0 | [
"MIT"
] | permissive | alexander-dev-hub/ccxt | d339662d527bdf0d99380c61ccce233c4475d1a1 | eba5dbe98cf106361c45cec9debda3d2722ea878 | refs/heads/master | 2022-07-10T05:03:35.809978 | 2019-09-02T19:10:10 | 2019-09-02T19:10:10 | 205,919,117 | 1 | 1 | MIT | 2022-06-22T15:56:21 | 2019-09-02T19:00:14 | JavaScript | UTF-8 | Python | false | false | 47,731 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import DDoSProtection
class coinbase (Exchange):
def describe(self):
return self.deep_extend(super(coinbase, self).describe(), {
'id': 'coinbase',
'name': 'Coinbase',
'countries': ['US'],
'rateLimit': 400, # 10k calls per hour
'version': 'v2',
'userAgent': self.userAgents['chrome'],
'headers': {
'CB-VERSION': '2018-05-30',
},
'has': {
'CORS': True,
'cancelOrder': False,
'createDepositAddress': True,
'createOrder': False,
'deposit': False,
'fetchBalance': True,
'fetchClosedOrders': False,
'fetchCurrencies': True,
'fetchDepositAddress': False,
'fetchMarkets': False,
'fetchMyTrades': False,
'fetchOHLCV': False,
'fetchOpenOrders': False,
'fetchOrder': False,
'fetchOrderBook': False,
'fetchL2OrderBook': False,
'fetchLedger': True,
'fetchOrders': False,
'fetchTicker': True,
'fetchTickers': False,
'fetchBidsAsks': False,
'fetchTrades': False,
'withdraw': False,
'fetchTransactions': False,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchMySells': True,
'fetchMyBuys': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/40811661-b6eceae2-653a-11e8-829e-10bfadb078cf.jpg',
'api': 'https://api.coinbase.com',
'www': 'https://www.coinbase.com',
'doc': 'https://developers.coinbase.com/api/v2',
'fees': 'https://support.coinbase.com/customer/portal/articles/2109597-buy-sell-bank-transfer-fees',
'referral': 'https://www.coinbase.com/join/58cbe25a355148797479dbd2',
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
},
'api': {
'public': {
'get': [
'currencies',
'time',
'exchange-rates',
'users/{user_id}',
'prices/{symbol}/buy',
'prices/{symbol}/sell',
'prices/{symbol}/spot',
],
},
'private': {
'get': [
'accounts',
'accounts/{account_id}',
'accounts/{account_id}/addresses',
'accounts/{account_id}/addresses/{address_id}',
'accounts/{account_id}/addresses/{address_id}/transactions',
'accounts/{account_id}/transactions',
'accounts/{account_id}/transactions/{transaction_id}',
'accounts/{account_id}/buys',
'accounts/{account_id}/buys/{buy_id}',
'accounts/{account_id}/sells',
'accounts/{account_id}/sells/{sell_id}',
'accounts/{account_id}/deposits',
'accounts/{account_id}/deposits/{deposit_id}',
'accounts/{account_id}/withdrawals',
'accounts/{account_id}/withdrawals/{withdrawal_id}',
'payment-methods',
'payment-methods/{payment_method_id}',
'user',
'user/auth',
],
'post': [
'accounts',
'accounts/{account_id}/primary',
'accounts/{account_id}/addresses',
'accounts/{account_id}/transactions',
'accounts/{account_id}/transactions/{transaction_id}/complete',
'accounts/{account_id}/transactions/{transaction_id}/resend',
'accounts/{account_id}/buys',
'accounts/{account_id}/buys/{buy_id}/commit',
'accounts/{account_id}/sells',
'accounts/{account_id}/sells/{sell_id}/commit',
'accounts/{account_id}/deposists',
'accounts/{account_id}/deposists/{deposit_id}/commit',
'accounts/{account_id}/withdrawals',
'accounts/{account_id}/withdrawals/{withdrawal_id}/commit',
],
'put': [
'accounts/{account_id}',
'user',
],
'delete': [
'accounts/{id}',
'accounts/{account_id}/transactions/{transaction_id}',
],
},
},
'exceptions': {
'two_factor_required': AuthenticationError, # 402 When sending money over 2fa limit
'param_required': ExchangeError, # 400 Missing parameter
'validation_error': ExchangeError, # 400 Unable to validate POST/PUT
'invalid_request': ExchangeError, # 400 Invalid request
'personal_details_required': AuthenticationError, # 400 User’s personal detail required to complete self request
'identity_verification_required': AuthenticationError, # 400 Identity verification is required to complete self request
'jumio_verification_required': AuthenticationError, # 400 Document verification is required to complete self request
'jumio_face_match_verification_required': AuthenticationError, # 400 Document verification including face match is required to complete self request
'unverified_email': AuthenticationError, # 400 User has not verified their email
'authentication_error': AuthenticationError, # 401 Invalid auth(generic)
'invalid_token': AuthenticationError, # 401 Invalid Oauth token
'revoked_token': AuthenticationError, # 401 Revoked Oauth token
'expired_token': AuthenticationError, # 401 Expired Oauth token
'invalid_scope': AuthenticationError, # 403 User hasn’t authenticated necessary scope
'not_found': ExchangeError, # 404 Resource not found
'rate_limit_exceeded': DDoSProtection, # 429 Rate limit exceeded
'internal_server_error': ExchangeError, # 500 Internal server error
},
'markets': {
'BTC/USD': {'id': 'btc-usd', 'symbol': 'BTC/USD', 'base': 'BTC', 'quote': 'USD'},
'LTC/USD': {'id': 'ltc-usd', 'symbol': 'LTC/USD', 'base': 'LTC', 'quote': 'USD'},
'ETH/USD': {'id': 'eth-usd', 'symbol': 'ETH/USD', 'base': 'ETH', 'quote': 'USD'},
'BCH/USD': {'id': 'bch-usd', 'symbol': 'BCH/USD', 'base': 'BCH', 'quote': 'USD'},
'BTC/EUR': {'id': 'btc-eur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR'},
'LTC/EUR': {'id': 'ltc-eur', 'symbol': 'LTC/EUR', 'base': 'LTC', 'quote': 'EUR'},
'ETH/EUR': {'id': 'eth-eur', 'symbol': 'ETH/EUR', 'base': 'ETH', 'quote': 'EUR'},
'BCH/EUR': {'id': 'bch-eur', 'symbol': 'BCH/EUR', 'base': 'BCH', 'quote': 'EUR'},
'BTC/GBP': {'id': 'btc-gbp', 'symbol': 'BTC/GBP', 'base': 'BTC', 'quote': 'GBP'},
'LTC/GBP': {'id': 'ltc-gbp', 'symbol': 'LTC/GBP', 'base': 'LTC', 'quote': 'GBP'},
'ETH/GBP': {'id': 'eth-gbp', 'symbol': 'ETH/GBP', 'base': 'ETH', 'quote': 'GBP'},
'BCH/GBP': {'id': 'bch-gbp', 'symbol': 'BCH/GBP', 'base': 'BCH', 'quote': 'GBP'},
},
'options': {
'accounts': [
'wallet',
'fiat',
# 'vault',
],
},
})
def fetch_time(self, params={}):
response = self.publicGetTime(params)
data = self.safe_value(response, 'data', {})
return self.parse8601(self.safe_string(data, 'iso'))
def fetch_accounts(self, params={}):
response = self.privateGetAccounts(params)
#
# {
# "id": "XLM",
# "name": "XLM Wallet",
# "primary": False,
# "type": "wallet",
# "currency": {
# "code": "XLM",
# "name": "Stellar Lumens",
# "color": "#000000",
# "sort_index": 127,
# "exponent": 7,
# "type": "crypto",
# "address_regex": "^G[A-Z2-7]{55}$",
# "asset_id": "13b83335-5ede-595b-821e-5bcdfa80560f",
# "destination_tag_name": "XLM Memo ID",
# "destination_tag_regex": "^[-~]{1,28}$"
# },
# "balance": {
# "amount": "0.0000000",
# "currency": "XLM"
# },
# "created_at": null,
# "updated_at": null,
# "resource": "account",
# "resource_path": "/v2/accounts/XLM",
# "allow_deposits": True,
# "allow_withdrawals": True
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
account = data[i]
currency = self.safe_value(account, 'currency', {})
currencyId = self.safe_string(currency, 'code')
code = self.safe_currency_code(currencyId)
result.append({
'id': self.safe_string(account, 'id'),
'type': self.safe_string(account, 'type'),
'code': code,
'info': account,
})
return result
def create_deposit_address(self, code, params={}):
accountId = self.safe_string(params, 'account_id')
params = self.omit(params, 'account_id')
if accountId is None:
self.loadAccounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['code'] == code and account['type'] == 'wallet':
accountId = account['id']
break
if accountId is None:
raise ExchangeError(self.id + ' createDepositAddress could not find the account with matching currency code, specify an `account_id` extra param')
request = {
'account_id': accountId,
}
response = self.privatePostAccountsAccountIdAddresses(self.extend(request, params))
#
# {
# "data": {
# "id": "05b1ebbf-9438-5dd4-b297-2ddedc98d0e4",
# "address": "coinbasebase",
# "address_info": {
# "address": "coinbasebase",
# "destination_tag": "287594668"
# },
# "name": null,
# "created_at": "2019-07-01T14:39:29Z",
# "updated_at": "2019-07-01T14:39:29Z",
# "network": "eosio",
# "uri_scheme": "eosio",
# "resource": "address",
# "resource_path": "/v2/accounts/14cfc769-e852-52f3-b831-711c104d194c/addresses/05b1ebbf-9438-5dd4-b297-2ddedc98d0e4",
# "warnings": [
# {
# "title": "Only send EOS(EOS) to self address",
# "details": "Sending any other cryptocurrency will result in permanent loss.",
# "image_url": "https://dynamic-assets.coinbase.com/deaca3d47b10ed4a91a872e9618706eec34081127762d88f2476ac8e99ada4b48525a9565cf2206d18c04053f278f693434af4d4629ca084a9d01b7a286a7e26/asset_icons/1f8489bb280fb0a0fd643c1161312ba49655040e9aaaced5f9ad3eeaf868eadc.png"
# },
# {
# "title": "Both an address and EOS memo are required to receive EOS",
# "details": "If you send funds without an EOS memo or with an incorrect EOS memo, your funds cannot be credited to your account.",
# "image_url": "https://www.coinbase.com/assets/receive-warning-2f3269d83547a7748fb39d6e0c1c393aee26669bfea6b9f12718094a1abff155.png"
# }
# ],
# "warning_title": "Only send EOS(EOS) to self address",
# "warning_details": "Sending any other cryptocurrency will result in permanent loss.",
# "destination_tag": "287594668",
# "deposit_uri": "eosio:coinbasebase?dt=287594668",
# "callback_url": null
# }
# }
#
data = self.safe_value(response, 'data', {})
tag = self.safe_string(data, 'destination_tag')
address = self.safe_string(data, 'address')
return {
'currency': code,
'tag': tag,
'address': address,
'info': response,
}
def fetch_my_sells(self, symbol=None, since=None, limit=None, params={}):
# they don't have an endpoint for all historical trades
request = self.prepare_account_request(limit, params)
self.load_markets()
query = self.omit(params, ['account_id', 'accountId'])
sells = self.privateGetAccountsAccountIdSells(self.extend(request, query))
return self.parse_trades(sells['data'], None, since, limit)
def fetch_my_buys(self, symbol=None, since=None, limit=None, params={}):
# they don't have an endpoint for all historical trades
request = self.prepare_account_request(limit, params)
self.load_markets()
query = self.omit(params, ['account_id', 'accountId'])
buys = self.privateGetAccountsAccountIdBuys(self.extend(request, query))
return self.parse_trades(buys['data'], None, since, limit)
def fetch_transactions_with_method(self, method, code=None, since=None, limit=None, params={}):
request = self.prepare_account_request_with_currency_code(code, limit, params)
self.load_markets()
query = self.omit(params, ['account_id', 'accountId'])
response = getattr(self, method)(self.extend(request, query))
return self.parseTransactions(response['data'], None, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
# fiat only, for crypto transactions use fetchLedger
return self.fetch_transactions_with_method('privateGetAccountsAccountIdWithdrawals', code, since, limit, params)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
# fiat only, for crypto transactions use fetchLedger
return self.fetch_transactions_with_method('privateGetAccountsAccountIdDeposits', code, since, limit, params)
def parse_transaction_status(self, status):
statuses = {
'created': 'pending',
'completed': 'ok',
'canceled': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, market=None):
#
# fiat deposit
#
# {
# "id": "f34c19f3-b730-5e3d-9f72",
# "status": "completed",
# "payment_method": {
# "id": "a022b31d-f9c7-5043-98f2",
# "resource": "payment_method",
# "resource_path": "/v2/payment-methods/a022b31d-f9c7-5043-98f2"
# },
# "transaction": {
# "id": "04ed4113-3732-5b0c-af86-b1d2146977d0",
# "resource": "transaction",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/04ed4113-3732-5b0c-af86"
# },
# "user_reference": "2VTYTH",
# "created_at": "2017-02-09T07:01:18Z",
# "updated_at": "2017-02-09T07:01:26Z",
# "resource": "deposit",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/deposits/f34c19f3-b730-5e3d-9f72",
# "committed": True,
# "payout_at": "2017-02-12T07:01:17Z",
# "instant": False,
# "fee": {"amount": "0.00", "currency": "EUR"},
# "amount": {"amount": "114.02", "currency": "EUR"},
# "subtotal": {"amount": "114.02", "currency": "EUR"},
# "hold_until": null,
# "hold_days": 0,
# "hold_business_days": 0,
# "next_step": null
# }
#
# fiat_withdrawal
#
# {
# "id": "cfcc3b4a-eeb6-5e8c-8058",
# "status": "completed",
# "payment_method": {
# "id": "8b94cfa4-f7fd-5a12-a76a",
# "resource": "payment_method",
# "resource_path": "/v2/payment-methods/8b94cfa4-f7fd-5a12-a76a"
# },
# "transaction": {
# "id": "fcc2550b-5104-5f83-a444",
# "resource": "transaction",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/fcc2550b-5104-5f83-a444"
# },
# "user_reference": "MEUGK",
# "created_at": "2018-07-26T08:55:12Z",
# "updated_at": "2018-07-26T08:58:18Z",
# "resource": "withdrawal",
# "resource_path": "/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/withdrawals/cfcc3b4a-eeb6-5e8c-8058",
# "committed": True,
# "payout_at": "2018-07-31T08:55:12Z",
# "instant": False,
# "fee": {"amount": "0.15", "currency": "EUR"},
# "amount": {"amount": "13130.69", "currency": "EUR"},
# "subtotal": {"amount": "13130.84", "currency": "EUR"},
# "idem": "e549dee5-63ed-4e79-8a96",
# "next_step": null
# }
#
amountObject = self.safe_value(transaction, 'amount', {})
feeObject = self.safe_value(transaction, 'fee', {})
id = self.safe_string(transaction, 'id')
timestamp = self.parse8601(self.safe_value(transaction, 'created_at'))
updated = self.parse8601(self.safe_value(transaction, 'updated_at'))
type = self.safe_string(transaction, 'resource')
amount = self.safe_float(amountObject, 'amount')
currencyId = self.safe_string(amountObject, 'currency')
currency = self.safe_currency_code(currencyId)
feeCost = self.safe_float(feeObject, 'amount')
feeCurrencyId = self.safe_string(feeObject, 'currency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
if status is None:
committed = self.safe_value(transaction, 'committed')
status = 'ok' if committed else 'pending'
return {
'info': transaction,
'id': id,
'txid': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': None,
'tag': None,
'type': type,
'amount': amount,
'currency': currency,
'status': status,
'updated': updated,
'fee': fee,
}
def parse_trade(self, trade, market=None):
#
# {
# "id": "67e0eaec-07d7-54c4-a72c-2e92826897df",
# "status": "completed",
# "payment_method": {
# "id": "83562370-3e5c-51db-87da-752af5ab9559",
# "resource": "payment_method",
# "resource_path": "/v2/payment-methods/83562370-3e5c-51db-87da-752af5ab9559"
# },
# "transaction": {
# "id": "441b9494-b3f0-5b98-b9b0-4d82c21c252a",
# "resource": "transaction",
# "resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/transactions/441b9494-b3f0-5b98-b9b0-4d82c21c252a"
# },
# "amount": {"amount": "1.00000000", "currency": "BTC"},
# "total": {"amount": "10.25", "currency": "USD"},
# "subtotal": {"amount": "10.10", "currency": "USD"},
# "created_at": "2015-01-31T20:49:02Z",
# "updated_at": "2015-02-11T16:54:02-08:00",
# "resource": "buy",
# "resource_path": "/v2/accounts/2bbf394c-193b-5b2a-9155-3b4732659ede/buys/67e0eaec-07d7-54c4-a72c-2e92826897df",
# "committed": True,
# "instant": False,
# "fee": {"amount": "0.15", "currency": "USD"},
# "payout_at": "2015-02-18T16:54:00-08:00"
# }
#
symbol = None
totalObject = self.safe_value(trade, 'total', {})
amountObject = self.safe_value(trade, 'amount', {})
subtotalObject = self.safe_value(trade, 'subtotal', {})
feeObject = self.safe_value(trade, 'fee', {})
id = self.safe_string(trade, 'id')
timestamp = self.parse8601(self.safe_value(trade, 'created_at'))
if market is None:
baseId = self.safe_string(totalObject, 'currency')
quoteId = self.safe_string(amountObject, 'currency')
if (baseId is not None) and(quoteId is not None):
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
orderId = None
side = self.safe_string(trade, 'resource')
type = None
cost = self.safe_float(subtotalObject, 'amount')
amount = self.safe_float(amountObject, 'amount')
price = None
if cost is not None:
if amount is not None:
price = cost / amount
feeCost = self.safe_float(feeObject, 'amount')
feeCurrencyId = self.safe_string(feeObject, 'currency')
feeCurrency = self.safe_currency_code(feeCurrencyId)
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return {
'info': trade,
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_currencies(self, params={}):
response = self.publicGetCurrencies(params)
currencies = response['data']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = self.safe_string(currency, 'id')
name = self.safe_string(currency, 'name')
code = self.safe_currency_code(id)
minimum = self.safe_float(currency, 'min_size')
result[code] = {
'id': id,
'code': code,
'info': currency, # the original payload
'name': name,
'active': True,
'fee': None,
'precision': None,
'limits': {
'amount': {
'min': minimum,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
timestamp = self.seconds()
market = self.market(symbol)
request = self.extend({
'symbol': market['id'],
}, params)
buy = self.publicGetPricesSymbolBuy(request)
sell = self.publicGetPricesSymbolSell(request)
spot = self.publicGetPricesSymbolSpot(request)
ask = self.safe_float(buy['data'], 'amount')
bid = self.safe_float(sell['data'], 'amount')
last = self.safe_float(spot['data'], 'amount')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'bid': bid,
'ask': ask,
'last': last,
'high': None,
'low': None,
'bidVolume': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': None,
'info': {
'buy': buy,
'sell': sell,
'spot': spot,
},
}
def fetch_balance(self, params={}):
self.load_markets()
response = self.privateGetAccounts(params)
balances = self.safe_value(response, 'data')
accounts = self.safe_value(params, 'type', self.options['accounts'])
result = {'info': response}
for b in range(0, len(balances)):
balance = balances[b]
if self.in_array(balance['type'], accounts):
currencyId = self.safe_string(balance['balance'], 'currency')
code = self.safe_currency_code(currencyId)
total = self.safe_float(balance['balance'], 'amount')
free = total
used = None
if code in result:
result[code]['free'] = self.sum(result[code]['free'], total)
result[code]['total'] = self.sum(result[code]['total'], total)
else:
account = {
'free': free,
'used': used,
'total': total,
}
result[code] = account
return self.parse_balance(result)
def fetch_ledger(self, code=None, since=None, limit=None, params={}):
self.load_markets()
request = self.prepare_account_request_with_currency_code(code, limit, params)
query = self.omit(params, ['account_id', 'accountId'])
# for pagination use parameter 'starting_after'
# the value for the next page can be obtained from the result of the previous call in the 'pagination' field
# eg: instance.last_json_response.pagination.next_starting_after
response = self.privateGetAccountsAccountIdTransactions(self.extend(request, query))
return self.parse_ledger(response['data'], None, since, limit)
def parse_ledger_entry_status(self, status):
types = {
'completed': 'ok',
}
return self.safe_string(types, status, status)
def parse_ledger_entry_type(self, type):
types = {
'buy': 'trade',
'sell': 'trade',
'fiat_deposit': 'transaction',
'fiat_withdrawal': 'transaction',
'exchange_deposit': 'transaction', # fiat withdrawal(from coinbase to coinbasepro)
'exchange_withdrawal': 'transaction', # fiat deposit(to coinbase from coinbasepro)
'send': 'transaction', # crypto deposit OR withdrawal
'pro_deposit': 'transaction', # crypto withdrawal(from coinbase to coinbasepro)
'pro_withdrawal': 'transaction', # crypto deposit(to coinbase from coinbasepro)
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
# crypto deposit transaction
#
# {
# id: '34e4816b-4c8c-5323-a01c-35a9fa26e490',
# type: 'send',
# status: 'completed',
# amount: {amount: '28.31976528', currency: 'BCH'},
# native_amount: {amount: '2799.65', currency: 'GBP'},
# description: null,
# created_at: '2019-02-28T12:35:20Z',
# updated_at: '2019-02-28T12:43:24Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c01d7364-edd7-5f3a-bd1d-de53d4cbb25e/transactions/34e4816b-4c8c-5323-a01c-35a9fa26e490',
# instant_exchange: False,
# network: {
# status: 'confirmed',
# hash: '56222d865dae83774fccb2efbd9829cf08c75c94ce135bfe4276f3fb46d49701',
# transaction_url: 'https://bch.btc.com/56222d865dae83774fccb2efbd9829cf08c75c94ce135bfe4276f3fb46d49701'
# },
# from: {resource: 'bitcoin_cash_network', currency: 'BCH'},
# details: {title: 'Received Bitcoin Cash', subtitle: 'From Bitcoin Cash address'}
# }
#
# crypto withdrawal transaction
#
# {
# id: '459aad99-2c41-5698-ac71-b6b81a05196c',
# type: 'send',
# status: 'completed',
# amount: {amount: '-0.36775642', currency: 'BTC'},
# native_amount: {amount: '-1111.65', currency: 'GBP'},
# description: null,
# created_at: '2019-03-20T08:37:07Z',
# updated_at: '2019-03-20T08:49:33Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/459aad99-2c41-5698-ac71-b6b81a05196c',
# instant_exchange: False,
# network: {
# status: 'confirmed',
# hash: '2732bbcf35c69217c47b36dce64933d103895277fe25738ffb9284092701e05b',
# transaction_url: 'https://blockchain.info/tx/2732bbcf35c69217c47b36dce64933d103895277fe25738ffb9284092701e05b',
# transaction_fee: {amount: '0.00000000', currency: 'BTC'},
# transaction_amount: {amount: '0.36775642', currency: 'BTC'},
# confirmations: 15682
# },
# to: {
# resource: 'bitcoin_address',
# address: '1AHnhqbvbYx3rnZx8uC7NbFZaTe4tafFHX',
# currency: 'BTC',
# address_info: {address: '1AHnhqbvbYx3rnZx8uC7NbFZaTe4tafFHX'}
# },
# idem: 'da0a2f14-a2af-4c5a-a37e-d4484caf582bsend',
# application: {
# id: '5756ab6e-836b-553b-8950-5e389451225d',
# resource: 'application',
# resource_path: '/v2/applications/5756ab6e-836b-553b-8950-5e389451225d'
# },
# details: {title: 'Sent Bitcoin', subtitle: 'To Bitcoin address'}
# }
#
# withdrawal transaction from coinbase to coinbasepro
#
# {
# id: '5b1b9fb8-5007-5393-b923-02903b973fdc',
# type: 'pro_deposit',
# status: 'completed',
# amount: {amount: '-0.00001111', currency: 'BCH'},
# native_amount: {amount: '0.00', currency: 'GBP'},
# description: null,
# created_at: '2019-02-28T13:31:58Z',
# updated_at: '2019-02-28T13:31:58Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c01d7364-edd7-5f3a-bd1d-de53d4cbb25e/transactions/5b1b9fb8-5007-5393-b923-02903b973fdc',
# instant_exchange: False,
# application: {
# id: '5756ab6e-836b-553b-8950-5e389451225d',
# resource: 'application',
# resource_path: '/v2/applications/5756ab6e-836b-553b-8950-5e389451225d'
# },
# details: {title: 'Transferred Bitcoin Cash', subtitle: 'To Coinbase Pro'}
# }
#
# withdrawal transaction from coinbase to gdax
#
# {
# id: 'badb7313-a9d3-5c07-abd0-00f8b44199b1',
# type: 'exchange_deposit',
# status: 'completed',
# amount: {amount: '-0.43704149', currency: 'BCH'},
# native_amount: {amount: '-51.90', currency: 'GBP'},
# description: null,
# created_at: '2019-03-19T10:30:40Z',
# updated_at: '2019-03-19T10:30:40Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c01d7364-edd7-5f3a-bd1d-de53d4cbb25e/transactions/badb7313-a9d3-5c07-abd0-00f8b44199b1',
# instant_exchange: False,
# details: {title: 'Transferred Bitcoin Cash', subtitle: 'To GDAX'}
# }
#
# deposit transaction from gdax to coinbase
#
# {
# id: '9c4b642c-8688-58bf-8962-13cef64097de',
# type: 'exchange_withdrawal',
# status: 'completed',
# amount: {amount: '0.57729420', currency: 'BTC'},
# native_amount: {amount: '4418.72', currency: 'GBP'},
# description: null,
# created_at: '2018-02-17T11:33:33Z',
# updated_at: '2018-02-17T11:33:33Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/9c4b642c-8688-58bf-8962-13cef64097de',
# instant_exchange: False,
# details: {title: 'Transferred Bitcoin', subtitle: 'From GDAX'}
# }
#
# deposit transaction from coinbasepro to coinbase
#
# {
# id: '8d6dd0b9-3416-568a-889d-8f112fae9e81',
# type: 'pro_withdrawal',
# status: 'completed',
# amount: {amount: '0.40555386', currency: 'BTC'},
# native_amount: {amount: '1140.27', currency: 'GBP'},
# description: null,
# created_at: '2019-03-04T19:41:58Z',
# updated_at: '2019-03-04T19:41:58Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/8d6dd0b9-3416-568a-889d-8f112fae9e81',
# instant_exchange: False,
# application: {
# id: '5756ab6e-836b-553b-8950-5e389451225d',
# resource: 'application',
# resource_path: '/v2/applications/5756ab6e-836b-553b-8950-5e389451225d'
# },
# details: {title: 'Transferred Bitcoin', subtitle: 'From Coinbase Pro'}
# }
#
# sell trade
#
# {
# id: 'a9409207-df64-585b-97ab-a50780d2149e',
# type: 'sell',
# status: 'completed',
# amount: {amount: '-9.09922880', currency: 'BTC'},
# native_amount: {amount: '-7285.73', currency: 'GBP'},
# description: null,
# created_at: '2017-03-27T15:38:34Z',
# updated_at: '2017-03-27T15:38:34Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/transactions/a9409207-df64-585b-97ab-a50780d2149e',
# instant_exchange: False,
# sell: {
# id: 'e3550b4d-8ae6-5de3-95fe-1fb01ba83051',
# resource: 'sell',
# resource_path: '/v2/accounts/c6afbd34-4bd0-501e-8616-4862c193cd84/sells/e3550b4d-8ae6-5de3-95fe-1fb01ba83051'
# },
# details: {
# title: 'Sold Bitcoin',
# subtitle: 'Using EUR Wallet',
# payment_method_name: 'EUR Wallet'
# }
# }
#
# buy trade
#
# {
# id: '63eeed67-9396-5912-86e9-73c4f10fe147',
# type: 'buy',
# status: 'completed',
# amount: {amount: '2.39605772', currency: 'ETH'},
# native_amount: {amount: '98.31', currency: 'GBP'},
# description: null,
# created_at: '2017-03-27T09:07:56Z',
# updated_at: '2017-03-27T09:07:57Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/8902f85d-4a69-5d74-82fe-8e390201bda7/transactions/63eeed67-9396-5912-86e9-73c4f10fe147',
# instant_exchange: False,
# buy: {
# id: '20b25b36-76c6-5353-aa57-b06a29a39d82',
# resource: 'buy',
# resource_path: '/v2/accounts/8902f85d-4a69-5d74-82fe-8e390201bda7/buys/20b25b36-76c6-5353-aa57-b06a29a39d82'
# },
# details: {
# title: 'Bought Ethereum',
# subtitle: 'Using EUR Wallet',
# payment_method_name: 'EUR Wallet'
# }
# }
#
# fiat deposit transaction
#
# {
# id: '04ed4113-3732-5b0c-af86-b1d2146977d0',
# type: 'fiat_deposit',
# status: 'completed',
# amount: {amount: '114.02', currency: 'EUR'},
# native_amount: {amount: '97.23', currency: 'GBP'},
# description: null,
# created_at: '2017-02-09T07:01:21Z',
# updated_at: '2017-02-09T07:01:22Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/04ed4113-3732-5b0c-af86-b1d2146977d0',
# instant_exchange: False,
# fiat_deposit: {
# id: 'f34c19f3-b730-5e3d-9f72-96520448677a',
# resource: 'fiat_deposit',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/deposits/f34c19f3-b730-5e3d-9f72-96520448677a'
# },
# details: {
# title: 'Deposited funds',
# subtitle: 'From SEPA Transfer(GB47 BARC 20..., reference CBADVI)',
# payment_method_name: 'SEPA Transfer(GB47 BARC 20..., reference CBADVI)'
# }
# }
#
# fiat withdrawal transaction
#
# {
# id: '957d98e2-f80e-5e2f-a28e-02945aa93079',
# type: 'fiat_withdrawal',
# status: 'completed',
# amount: {amount: '-11000.00', currency: 'EUR'},
# native_amount: {amount: '-9698.22', currency: 'GBP'},
# description: null,
# created_at: '2017-12-06T13:19:19Z',
# updated_at: '2017-12-06T13:19:19Z',
# resource: 'transaction',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/transactions/957d98e2-f80e-5e2f-a28e-02945aa93079',
# instant_exchange: False,
# fiat_withdrawal: {
# id: 'f4bf1fd9-ab3b-5de7-906d-ed3e23f7a4e7',
# resource: 'fiat_withdrawal',
# resource_path: '/v2/accounts/91cd2d36-3a91-55b6-a5d4-0124cf105483/withdrawals/f4bf1fd9-ab3b-5de7-906d-ed3e23f7a4e7'
# },
# details: {
# title: 'Withdrew funds',
# subtitle: 'To HSBC BANK PLC(GB74 MIDL...)',
# payment_method_name: 'HSBC BANK PLC(GB74 MIDL...)'
# }
# }
#
amountInfo = self.safe_value(item, 'amount', {})
amount = self.safe_float(amountInfo, 'amount')
direction = None
if amount < 0:
direction = 'out'
amount = -amount
else:
direction = 'in'
currencyId = self.safe_string(amountInfo, 'currency')
code = self.safe_currency_code(currencyId, currency)
#
# the address and txid do not belong to the unified ledger structure
#
# address = None
# if item['to']:
# address = self.safe_string(item['to'], 'address')
# }
# txid = None
#
fee = None
networkInfo = self.safe_value(item, 'network', {})
# txid = network['hash'] # txid does not belong to the unified ledger structure
feeInfo = self.safe_value(networkInfo, 'transaction_fee')
if feeInfo is not None:
feeCurrencyId = self.safe_string(feeInfo, 'currency')
feeCurrencyCode = self.safe_currency_code(feeCurrencyId, currency)
feeAmount = self.safe_float(feeInfo, 'amount')
fee = {
'cost': feeAmount,
'currency': feeCurrencyCode,
}
timestamp = self.parse8601(self.safe_value(item, 'created_at'))
id = self.safe_string(item, 'id')
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
status = self.parse_ledger_entry_status(self.safe_string(item, 'status'))
path = self.safe_string(item, 'resource_path')
accountId = None
if path is not None:
parts = path.split('/')
numParts = len(parts)
if numParts > 3:
accountId = parts[3]
return {
'info': item,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'direction': direction,
'account': accountId,
'referenceId': None,
'referenceAccount': None,
'type': type,
'currency': code,
'amount': amount,
'before': None,
'after': None,
'status': status,
'fee': fee,
}
def find_account_id(self, code):
self.load_markets()
self.loadAccounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['code'] == code:
return account['id']
return None
def prepare_account_request(self, limit=None, params={}):
accountId = self.safe_string_2(params, 'account_id', 'accountId')
if accountId is None:
raise ArgumentsRequired(self.id + ' method requires an account_id(or accountId) parameter')
request = {
'account_id': accountId,
}
if limit is not None:
request['limit'] = limit
return request
def prepare_account_request_with_currency_code(self, code=None, limit=None, params={}):
accountId = self.safe_string_2(params, 'account_id', 'accountId')
if accountId is None:
if code is None:
raise ArgumentsRequired(self.id + ' method requires an account_id(or accountId) parameter OR a currency code argument')
accountId = self.find_account_id(code)
if accountId is None:
raise ExchangeError(self.id + ' could not find account id for ' + code)
request = {
'account_id': accountId,
}
if limit is not None:
request['limit'] = limit
return request
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
fullPath = '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
fullPath += '?' + self.urlencode(query)
url = self.urls['api'] + fullPath
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
auth = nonce + method + fullPath + payload
signature = self.hmac(self.encode(auth), self.encode(self.secret))
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
feedback = self.id + ' ' + body
#
# {"error": "invalid_request", "error_description": "The request is missing a required parameter, includes an unsupported parameter value, or is otherwise malformed."}
#
# or
#
# {
# "errors": [
# {
# "id": "not_found",
# "message": "Not found"
# }
# ]
# }
#
exceptions = self.exceptions
errorCode = self.safe_string(response, 'error')
if errorCode is not None:
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
else:
raise ExchangeError(feedback)
errors = self.safe_value(response, 'errors')
if errors is not None:
if isinstance(errors, list):
numErrors = len(errors)
if numErrors > 0:
errorCode = self.safe_string(errors[0], 'id')
if errorCode is not None:
if errorCode in exceptions:
raise exceptions[errorCode](feedback)
else:
raise ExchangeError(feedback)
data = self.safe_value(response, 'data')
if data is None:
raise ExchangeError(self.id + ' failed due to a malformed response ' + self.json(response))
| [
"[email protected]"
] | |
008181a3ff8d888d84ca86e89c8bb777fa600932 | afc677459e46635ceffccf60d1daf50e62694557 | /ACME/math/randrotm.py | 4c23dd04c57218e5324cda86a0211924591e4879 | [
"MIT"
] | permissive | mauriziokovacic/ACME | 056b06da4bf66d89087fcfcbe0fd0a2e255d09f3 | 2615b66dd4addfd5c03d9d91a24c7da414294308 | refs/heads/master | 2020-05-23T23:40:06.667416 | 2020-01-10T14:42:01 | 2020-01-10T14:42:01 | 186,997,977 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 514 | py | from .constant import *
from .eul2rotm import *
def randrotm(n=1, device='cuda:0'):
"""
Returns n randorm rotation matrices
Parameters
----------
n : int (optional)
the number of rotation matrices to generate (default is 1)
device : str (optional)
the device to store the tensor to (default is 'cuda:0')
Returns
-------
Tensor
the (n,3,3,) rotation matrices tensor
"""
return eul2rotm(torch.rand(n, 3, dtype=torch.float, device=device)*PI2)
| [
"[email protected]"
] | |
873cbdcd0a29dfe7fca8f80e22b8dad16471b2fb | 11f7add72635ad985b3e98fd77e9426e8c74ab08 | /google-api-python-client-1.0beta7/samples/adsense/sample_utils.py | e4463e584deb6cc3a870ce678814f5b070beb7f1 | [] | no_license | harshdarji/python | afa6b11338504567ece8bb1e78e841d13716ff14 | 8bad854304f423264b7b0724b87c7cd7de748cd6 | refs/heads/master | 2020-12-31T01:48:04.439466 | 2012-09-13T09:22:58 | 2012-09-13T09:22:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,811 | py | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Auxiliary file for AdSense Management API code samples.
Handles various tasks to do with logging, authentication and initialization.
"""
__author__ = '[email protected] (Sergio Gomes)'
import logging
import os
import sys
from apiclient.discovery import build
import gflags
import httplib2
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run
FLAGS = gflags.FLAGS
# CLIENT_SECRETS, name of a file containing the OAuth 2.0 information for this
# application, including client_id and client_secret, which are found
# on the API Access tab on the Google APIs
# Console <http://code.google.com/apis/console>
CLIENT_SECRETS = 'client_secrets.json'
# Helpful message to display in the browser if the CLIENT_SECRETS file
# is missing.
MISSING_CLIENT_SECRETS_MESSAGE = """
WARNING: Please configure OAuth 2.0
To make this sample run you will need to populate the client_secrets.json file
found at:
%s
with information from the APIs Console <https://code.google.com/apis/console>.
""" % os.path.join(os.path.dirname(__file__), CLIENT_SECRETS)
# Set up a Flow object to be used if we need to authenticate.
FLOW = flow_from_clientsecrets(CLIENT_SECRETS,
scope='https://www.googleapis.com/auth/adsense.readonly',
message=MISSING_CLIENT_SECRETS_MESSAGE)
# The gflags module makes defining command-line options easy for applications.
# Run this program with the '--help' argument to see all the flags that it
# understands.
gflags.DEFINE_enum('logging_level', 'ERROR',
['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
'Set the level of logging detail.')
def process_flags(argv):
"""Uses the command-line flags to set the logging level."""
# Let the gflags module process the command-line arguments.
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\\nUsage: %s ARGS\\n%s' % (e, argv[0], FLAGS)
sys.exit(1)
# Set the logging according to the command-line flag.
logging.getLogger().setLevel(getattr(logging, FLAGS.logging_level))
def prepare_credentials():
"""Handles auth. Reuses credentialss if available or runs the auth flow."""
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# Credentials will get written back to a file.
storage = Storage('adsense.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = run(FLOW, storage)
return credentials
def retrieve_service(http):
"""Retrieves an AdSense Management API service via the discovery service."""
# Construct a service object via the discovery service.
service = build("adsense", "v1", http=http)
return service
def initialize_service():
"""Builds instance of service from discovery data and does auth."""
# Create an httplib2.Http object to handle our HTTP requests.
http = httplib2.Http()
# Prepare credentials, and authorize HTTP object with them.
credentials = prepare_credentials()
http = credentials.authorize(http)
# Retrieve service.
return retrieve_service(http)
| [
"[email protected]"
] | |
eed9a66ad4a6c595e1640777cc94f4b3abebc576 | 25040bd4e02ff9e4fbafffee0c6df158a62f0d31 | /www/htdocs/wt/lapnw/data/item_20_3.tmpl.py | 28c4804d0e6461b17d8ba3b9ace7498eb496d3a5 | [] | no_license | erochest/atlas | 107a14e715a058d7add1b45922b0f8d03bd2afef | ea66b80c449e5b1141e5eddc4a5995d27c2a94ee | refs/heads/master | 2021-05-16T00:45:47.585627 | 2017-10-09T10:12:03 | 2017-10-09T10:12:03 | 104,338,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py |
from lap.web.templates import GlobalTemplate, SubtemplateCode
class main(GlobalTemplate):
title = 'Page.Item: 20.3'
project = 'lapnw'
class page(SubtemplateCode):
pass
| [
"eric@eric-desktop"
] | eric@eric-desktop |
eb90db1bbb2724acef1832c6ce3bcc52e6a902ac | a838d4bed14d5df5314000b41f8318c4ebe0974e | /sdk/iothub/azure-mgmt-iothub/azure/mgmt/iothub/v2017_07_01/aio/operations/_iot_hub_resource_operations.py | 25662122bb60abebfac1843541c4c76061cd131e | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | scbedd/azure-sdk-for-python | ee7cbd6a8725ddd4a6edfde5f40a2a589808daea | cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a | refs/heads/master | 2023-09-01T08:38:56.188954 | 2021-06-17T22:52:28 | 2021-06-17T22:52:28 | 159,568,218 | 2 | 0 | MIT | 2019-08-11T21:16:01 | 2018-11-28T21:34:49 | Python | UTF-8 | Python | false | false | 77,349 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IotHubResourceOperations:
"""IotHubResourceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothub.v2017_07_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.IotHubDescription":
"""Get the non-security related metadata of an IoT hub.
Get the non-security related metadata of an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.IotHubDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
if_match: Optional[str] = None,
**kwargs
) -> "_models.IotHubDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if if_match is not None:
header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(iot_hub_description, 'IotHubDescription')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
resource_name: str,
iot_hub_description: "_models.IotHubDescription",
if_match: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["_models.IotHubDescription"]:
"""Create or update the metadata of an IoT hub.
Create or update the metadata of an Iot hub. The usual pattern to modify a property is to
retrieve the IoT hub metadata and security metadata, and then combine them with the modified
values in a new body to update the IoT hub. If certain properties are missing in the JSON,
updating IoT Hub may cause these values to fallback to default, which may lead to unexpected
behavior.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param iot_hub_description: The IoT hub metadata and security metadata.
:type iot_hub_description: ~azure.mgmt.iothub.v2017_07_01.models.IotHubDescription
:param if_match: ETag of the IoT Hub. Do not specify for creating a brand new IoT Hub. Required
to update an existing IoT Hub.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2017_07_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
iot_hub_description=iot_hub_description,
if_match=if_match,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[Union["_models.IotHubDescription", "_models.ErrorDetails"]]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if response.status_code == 404:
deserialized = self._deserialize('ErrorDetails', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncLROPoller[Union["_models.IotHubDescription", "_models.ErrorDetails"]]:
"""Delete an IoT hub.
Delete an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either IotHubDescription or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothub.v2017_07_01.models.IotHubDescription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[Union["_models.IotHubDescription", "_models.ErrorDetails"]]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('IotHubDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}'} # type: ignore
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a subscription.
Get all the IoT hubs in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/IotHubs'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.IotHubDescriptionListResult"]:
"""Get all the IoT hubs in a resource group.
Get all the IoT hubs in a resource group.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.IotHubDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs'} # type: ignore
async def get_stats(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.RegistryStatistics":
"""Get the statistics from an IoT hub.
Get the statistics from an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RegistryStatistics, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.RegistryStatistics
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RegistryStatistics"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.get_stats.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('RegistryStatistics', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stats.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubStats'} # type: ignore
def get_valid_skus(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.IotHubSkuDescriptionListResult"]:
"""Get the list of valid SKUs for an IoT hub.
Get the list of valid SKUs for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubSkuDescriptionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.IotHubSkuDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubSkuDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_valid_skus.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubSkuDescriptionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/skus'} # type: ignore
def list_event_hub_consumer_groups(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
**kwargs
) -> AsyncIterable["_models.EventHubConsumerGroupsListResult"]:
"""Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an IoT hub.
Get a list of the consumer groups in the Event Hub-compatible device-to-cloud endpoint in an
IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint.
:type event_hub_endpoint_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either EventHubConsumerGroupsListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.EventHubConsumerGroupsListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupsListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_event_hub_consumer_groups.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('EventHubConsumerGroupsListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_event_hub_consumer_groups.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups'} # type: ignore
async def get_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs
) -> "_models.EventHubConsumerGroupInfo":
"""Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
Get a consumer group from the Event Hub-compatible device-to-cloud endpoint for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to retrieve.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.get_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
async def create_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs
) -> "_models.EventHubConsumerGroupInfo":
"""Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
Add a consumer group to an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to add.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: EventHubConsumerGroupInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.EventHubConsumerGroupInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.EventHubConsumerGroupInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.create_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.put(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('EventHubConsumerGroupInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
async def delete_event_hub_consumer_group(
self,
resource_group_name: str,
resource_name: str,
event_hub_endpoint_name: str,
name: str,
**kwargs
) -> None:
"""Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
Delete a consumer group from an Event Hub-compatible endpoint in an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param event_hub_endpoint_name: The name of the Event Hub-compatible endpoint in the IoT hub.
:type event_hub_endpoint_name: str
:param name: The name of the consumer group to delete.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.delete_event_hub_consumer_group.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'eventHubEndpointName': self._serialize.url("event_hub_endpoint_name", event_hub_endpoint_name, 'str'),
'name': self._serialize.url("name", name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete_event_hub_consumer_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/eventHubEndpoints/{eventHubEndpointName}/ConsumerGroups/{name}'} # type: ignore
def list_jobs(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.JobResponseListResult"]:
"""Get a list of all the jobs in an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get a list of all the jobs in an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either JobResponseListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.JobResponseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_jobs.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('JobResponseListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_jobs.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs'} # type: ignore
async def get_job(
self,
resource_group_name: str,
resource_name: str,
job_id: str,
**kwargs
) -> "_models.JobResponse":
"""Get the details of a job from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
Get the details of a job from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param job_id: The job identifier.
:type job_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.get_job.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'jobId': self._serialize.url("job_id", job_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_job.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/jobs/{jobId}'} # type: ignore
def get_quota_metrics(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.IotHubQuotaMetricInfoListResult"]:
"""Get the quota metrics for an IoT hub.
Get the quota metrics for an IoT hub.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotHubQuotaMetricInfoListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.IotHubQuotaMetricInfoListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubQuotaMetricInfoListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_quota_metrics.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('IotHubQuotaMetricInfoListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_quota_metrics.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/quotaMetrics'} # type: ignore
async def check_name_availability(
self,
operation_inputs: "_models.OperationInputs",
**kwargs
) -> "_models.IotHubNameAvailabilityInfo":
"""Check if an IoT hub name is available.
Check if an IoT hub name is available.
:param operation_inputs: Set the name parameter in the OperationInputs structure to the name of
the IoT hub to check.
:type operation_inputs: ~azure.mgmt.iothub.v2017_07_01.models.OperationInputs
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IotHubNameAvailabilityInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.IotHubNameAvailabilityInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotHubNameAvailabilityInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.check_name_availability.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(operation_inputs, 'OperationInputs')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('IotHubNameAvailabilityInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkNameAvailability'} # type: ignore
def list_keys(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> AsyncIterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]:
"""Get the security metadata for an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get the security metadata for an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothub.v2017_07_01.models.SharedAccessSignatureAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_keys.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.post(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRuleListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/listkeys'} # type: ignore
async def get_keys_for_key_name(
self,
resource_group_name: str,
resource_name: str,
key_name: str,
**kwargs
) -> "_models.SharedAccessSignatureAuthorizationRule":
"""Get a shared access policy by name from an IoT hub. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
Get a shared access policy by name from an IoT hub. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-security.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param key_name: The name of the shared access policy.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedAccessSignatureAuthorizationRule, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.SharedAccessSignatureAuthorizationRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
accept = "application/json"
# Construct URL
url = self.get_keys_for_key_name.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'keyName': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/IotHubKeys/{keyName}/listkeys'} # type: ignore
async def export_devices(
self,
resource_group_name: str,
resource_name: str,
export_devices_parameters: "_models.ExportDevicesRequest",
**kwargs
) -> "_models.JobResponse":
"""Exports all the device identities in the IoT hub identity registry to an Azure Storage blob container. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Exports all the device identities in the IoT hub identity registry to an Azure Storage blob
container. For more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param export_devices_parameters: The parameters that specify the export devices operation.
:type export_devices_parameters: ~azure.mgmt.iothub.v2017_07_01.models.ExportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.export_devices.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(export_devices_parameters, 'ExportDevicesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
export_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/exportDevices'} # type: ignore
async def import_devices(
self,
resource_group_name: str,
resource_name: str,
import_devices_parameters: "_models.ImportDevicesRequest",
**kwargs
) -> "_models.JobResponse":
"""Import, update, or delete device identities in the IoT hub identity registry from a blob. For more information, see: https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
Import, update, or delete device identities in the IoT hub identity registry from a blob. For
more information, see:
https://docs.microsoft.com/azure/iot-hub/iot-hub-devguide-identity-registry#import-and-export-device-identities.
:param resource_group_name: The name of the resource group that contains the IoT hub.
:type resource_group_name: str
:param resource_name: The name of the IoT hub.
:type resource_name: str
:param import_devices_parameters: The parameters that specify the import devices operation.
:type import_devices_parameters: ~azure.mgmt.iothub.v2017_07_01.models.ImportDevicesRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResponse, or the result of cls(response)
:rtype: ~azure.mgmt.iothub.v2017_07_01.models.JobResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobResponse"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-07-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_devices.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(import_devices_parameters, 'ImportDevicesRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/IotHubs/{resourceName}/importDevices'} # type: ignore
| [
"[email protected]"
] | |
f626569e98ae081d24c8713a307a06dba8355c47 | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/surface/netapp/volumes/snapshots/describe.py | bbd36f4ef5454bc19ac3d530aa4c3c5ee430b40d | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 2,651 | py | # -*- coding: utf-8 -*- #
# Copyright 2022 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Describe a Cloud NetApp Volume Snapshot."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.netapp.volumes.snapshots import client as snapshots_client
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.netapp import flags
from googlecloudsdk.command_lib.netapp.volumes.snapshots import flags as snapshots_flags
from googlecloudsdk.command_lib.util.concepts import concept_parsers
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Describe(base.DescribeCommand):
"""Describe a Cloud NetApp Volume Snapshot."""
_RELEASE_TRACK = base.ReleaseTrack.GA
detailed_help = {
'DESCRIPTION': """\
Describe a Cloud NetApp Volume Snapshot.
""",
'EXAMPLES': """\
The following command describes a Snapshot named NAME in the given location and volume:
$ {command} NAME --location=us-central1 --volume=vol1
""",
}
@staticmethod
def Args(parser):
concept_parsers.ConceptParser([flags.GetSnapshotPresentationSpec(
'The Snapshot to describe.')]).AddToParser(parser)
snapshots_flags.AddSnapshotVolumeArg(parser)
def Run(self, args):
"""Get a Cloud NetApp Volume Snapshot in the current project."""
snapshot_ref = args.CONCEPTS.snapshot.Parse()
if args.CONCEPTS.volume.Parse() is None:
raise exceptions.RequiredArgumentException(
'--volume', 'Requires a volume to describe snapshot of')
client = snapshots_client.SnapshotsClient(release_track=self._RELEASE_TRACK)
return client.GetSnapshot(snapshot_ref)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class DescribeBeta(Describe):
"""Describe a Cloud NetApp Volume Snapshot."""
_RELEASE_TRACK = base.ReleaseTrack.BETA
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class DescribeAlpha(DescribeBeta):
"""Describe a Cloud NetApp Volume Snapshot."""
_RELEASE_TRACK = base.ReleaseTrack.ALPHA
| [
"[email protected]"
] | |
1f258b67de069bd06008aaeaf03c969cf81ea192 | 9bdc868dbc3910ae72a05ab66cf53d30dffab2a8 | /test/functional/p2p_zpos_fakestake.py | b572c1e97eb8fd15a6ab5dbf088628530f98b212 | [
"MIT"
] | permissive | YEPCOIN/Yep-Core | 6aa8a3750e8496509501b7ff4d663a2681854c96 | 541ada7485b28abe1429c400835ce228ca9a6903 | refs/heads/master | 2020-07-03T04:44:44.361866 | 2020-05-06T19:45:05 | 2020-05-06T19:45:05 | 201,787,182 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,016 | py | #!/usr/bin/env python3
# Copyright (c) 2019 The Yep Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend
of an already spent coin.
'''
from time import sleep
from test_framework.authproxy import JSONRPCException
from fake_stake.base_test import yep_FakeStakeTest
class zPoSFakeStake(yep_FakeStakeTest):
def run_test(self):
self.description = "Covers the scenario of a zPoS block where the coinstake input is a zerocoin spend of an already spent coin."
self.init_test()
DENOM_TO_USE = 5000 # zc denomination
INITAL_MINED_BLOCKS = 321 # First mined blocks (rewards collected to mint)
MORE_MINED_BLOCKS = 301 # More blocks mined before spending zerocoins
self.NUM_BLOCKS = 2 # Number of spammed blocks
# 1) Starting mining blocks
self.log.info("Mining %d blocks to get to zPOS activation...." % INITAL_MINED_BLOCKS)
self.node.generate(INITAL_MINED_BLOCKS)
sleep(2)
# 2) Collect the possible prevouts and mint zerocoins with those
self.log.info("Collecting all unspent coins which we generated from mining...")
balance = self.node.getbalance("*", 100)
self.log.info("Minting zerocoins...")
initial_mints = 0
while balance > DENOM_TO_USE:
try:
self.node.mintzerocoin(DENOM_TO_USE)
except JSONRPCException:
break
sleep(1)
initial_mints += 1
self.node.generate(1)
sleep(1)
if initial_mints % 5 == 0:
self.log.info("Minted %d coins" % initial_mints)
if initial_mints >= 70:
break
balance = self.node.getbalance("*", 100)
self.log.info("Minted %d coins in the %d-denom, remaining balance %d", initial_mints, DENOM_TO_USE, balance)
sleep(2)
# 3) mine more blocks
self.log.info("Mining %d more blocks ... and getting spendable zerocoins" % MORE_MINED_BLOCKS)
self.node.generate(MORE_MINED_BLOCKS)
sleep(2)
mints = self.node.listmintedzerocoins(True, True)
mints_hashes = [x["serial hash"] for x in mints]
# This mints are not ready spendable, only few of them.
self.log.info("Got %d confirmed mints" % len(mints_hashes))
# 4) spend mints
self.log.info("Spending mints in block %d..." % self.node.getblockcount())
spends = 0
spent_mints = []
for mint in mints_hashes:
# create a single element list to pass to RPC spendzerocoinmints
mint_arg = []
mint_arg.append(mint)
try:
self.node.spendzerocoinmints(mint_arg)
sleep(1)
spends += 1
spent_mints.append(mint)
except JSONRPCException as e:
self.log.warning(str(e))
continue
sleep(1)
self.log.info("Successfully spent %d mints" % spends)
# 5) Start mining again so that spends get confirmed in a block.
self.log.info("Mining 5 more blocks...")
self.node.generate(5)
sleep(2)
# 6) Collect some prevouts for random txes
self.log.info("Collecting inputs for txes...")
spending_utxo_list = self.node.listunspent()
sleep(1)
# 7) Create "Fake Stake" blocks and send them
self.log.info("Creating Fake stake zPoS blocks...")
err_msgs = self.test_spam("Main", mints, spending_utxo_list=spending_utxo_list, fZPoS=True)
if not len(err_msgs) == 0:
self.log.error("result: " + " | ".join(err_msgs))
raise AssertionError("TEST FAILED")
self.log.info("%s PASSED" % self.__class__.__name__)
if __name__ == '__main__':
zPoSFakeStake().main()
| [
"[email protected]"
] | |
cc5ea903908b10ba307f2cedf7ce37fb78ad1004 | 3545ee160458acac7452666aa07826b58e144351 | /demo/text_recognition/rflearning/configs/rfl_res32_attn.py | 7f5925689be5d6a7f4dda0f6a30debf79ae900a1 | [
"Apache-2.0"
] | permissive | OCRWorld/DAVAR-Lab-OCR | 7cc81af43a0e8f60066e7761d950f509c40cfd46 | fb47a96d1a38f5ce634c6f12d710ed5300cc89fc | refs/heads/main | 2023-08-29T09:41:19.377628 | 2021-11-08T11:16:37 | 2021-11-08T11:16:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,809 | py | """
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : rfl_res32_attn.py
# Abstract : RF-learning recognition Model
# Current Version: 1.0.0
# Date : 2021-06-11
##################################################################################################
"""
# encoding=utf-8
_base_ = [
'./baseline.py'
]
# recognition dictionary
character = "/data1/open-source/demo/text_recognition/__dictionary__/Scene_text_36.txt"
"""
1. Model Settings
include model-related setting, such as model type, user-selected modules and parameters.
"""
# model parameters for changing the rf-learning
model = dict(
type='RFLRecognizor',
transformation=dict(
type='TPS_SpatialTransformer',
F=20,
I_size=(32, 100),
I_r_size=(32, 100),
I_channel_num=1,
),
backbone=dict(
type='ResNetRFL',
input_channel=1,
output_channel=512,),
# neck_s2v=None, # Training strategy
# neck_v2s=None, # Step1: training total RF-Learning, train_type="visual",
neck_v2s=dict( # Step2: training total RF-Learning, train_type="total",
type='V2SAdaptor', # neck_v2s=V2SAdaptor, neck_s2v=S2VAdaptor
in_channels=512,),
neck_s2v=dict(
type='S2VAdaptor',
in_channels=512,),
counting_head=dict(
type='CNTHead',
embed_size=512,
encode_length=26,
loss_count=dict(
type="MSELoss",
reduction='mean'),
converter=dict(
type='RFLLabelConverter',
character=character, ),),
sequence_head=dict(
type='AttentionHead',
input_size=512,
hidden_size=256,
batch_max_length=25,
converter=dict(
type='AttnLabelConverter',
character=character,
use_cha_eos=True, ),
loss_att=dict(
type='StandardCrossEntropyLoss',
ignore_index=0,
reduction='mean',
loss_weight=1.0),),
# train_type="visual",
train_type="total",
_delete_=True
# Step1: train_type="visual"
# Step2: train_type="semantic",
# Step3: train_type="total"
)
"""
2. Data Setting
description:
Pipeline and training dataset settings
Add keywords:
None
"""
# dataset settings
# support the dataset type
ppld = {
'LMDB_Standard': 'LoadImageFromLMDB', # open-source LMDB data
# Davar dataset type
'LMDB_Davar': 'RCGLoadImageFromLMDB',
'File': 'RCGLoadImageFromFile',
'Loose': 'RCGLoadImageFromLoose',
'Tight': 'RCGLoadImageFromTight',
}
"""
Dataset Instruction manual:
data_types=['LMDB','File','Tight','File'] # corresponding to different data type
ann_files = ['train1|train2|train3',
'Datalist/train1.json|Datalist/train2.json',
'Datalist/train_xxx.json',
'Datalist/train_yyy.json'] # Separated by '|'
img_prefixes = ['xx/yy/zz/|aa/bb/cc/|mm/nn/',
'dd/ee/', 'ff/gg/hh/',
'ii/jj/kk/'] # Separated by '|', corresponding to the ann_files
batch_ratios = ['0.1|0.1|0.1',
'0.2|0.2',
'0.1',
'0.2'] # string format, corresponding to the ann_files
# sum of the batch_ratios equals to 1
"""
# Training dataset format
data_types = [
'LMDB_Standard',
'LMDB_Standard'
]
# File prefix path of the traning dataset
img_prefixes = [
'*****/TextRecognition/LMDB/BenchEn/train/', # path to the training dataset
'*****/TextRecognition/LMDB/BenchEn/train/', # path to the training dataset
]
# Dataset Name
ann_files = [
'MJ', 'SK'
]
# Training dataset load type
dataset_type = 'DavarMultiDataset'
# Normalization parameter
img_norm_cfg = dict(
mean=[127.5],
std=[127.5])
# training pipeline parameter
train_pipelines = [
dict(
type=ppld["LMDB_Standard"],
character=character, # recognition dictionary
test_mode=False, # whether is in test mode
sensitive=False, # sensitive to Upper or Lower
color_types=["gray"], # color loading type, ["rgb", "bgr", "gray"]
fil_ops=True,
),
dict(
type='ResizeNormalize',
size=(100, 32),
interpolation=2,
# Interpolation method of the Resize function
# 0 - INTER_NEAREST(default) # 1 - INTER_LINEAR
# 2 - INTER_CUBIC # 3 - INTER_AREA
mean=img_norm_cfg["mean"],
std=img_norm_cfg["std"], ),
dict(type='DavarDefaultFormatBundle'), # Uniform Training data tensor format
dict(type='DavarCollect', keys=['img', 'gt_text']), # Data content actually involved in training stage
]
print('train_piplines:', train_pipelines)
val_pipeline = [
dict(type=ppld["LMDB_Standard"],
character=character,
test_mode=True,
sensitive=False,
color_types=["gray"], # color loading type, ["rgb", "bgr", "gray"]
fil_ops=True, ),
dict(type='ResizeNormalize',
size=(100, 32),
interpolation=2,
mean=img_norm_cfg["mean"],
std=img_norm_cfg["std"],
),
dict(type='DavarDefaultFormatBundle'),
dict(type='DavarCollect', keys=['img', 'gt_text'], meta_keys=[]),
]
test_pipeline = [
dict(type=ppld["LMDB_Standard"],
character=character,
test_mode=True,
sensitive=False,
color_types=["gray"],
fil_ops=True, ),
dict(type='ResizeNormalize',
size=(100, 32),
interpolation=2,
mean=img_norm_cfg["mean"],
std=img_norm_cfg["std"],
),
dict(type='DavarDefaultFormatBundle'),
dict(type='DavarCollect', keys=['img'], meta_keys=[]),
]
data = dict(
samples_per_gpu=128, # batchsize=100->memory:6400M
workers_per_gpu=2,
sampler=dict(
type='DistBatchBalancedSampler', # BatchBalancedSampler or DistBatchBalancedSampler
mode=0,
# model 0: Balance in batch, calculate the epoch according to the first iterative data set
# model 1: Balance in batch, calculate the epoch according to the last iterative data set
# model 2: Balance in batch, record unused data
# model -1: Each dataset is directly connected and shuffled
),
train=dict(
type=dataset_type,
batch_ratios=['0.5', '0.5'],
dataset=dict(
type="DavarRCGDataset",
data_type=data_types,
ann_file=ann_files,
img_prefix=img_prefixes,
batch_max_length=25,
used_ratio=1,
test_mode=False,
pipeline=train_pipelines)
),
val=dict(
type=dataset_type,
batch_ratios=1,
samples_per_gpu=400,
test_mode=True,
dataset=dict(
type="DavarRCGDataset",
data_type="LMDB_Standard",
ann_file='mixture',
img_prefix='/path/to/validation/',
batch_max_length=25,
used_ratio=1,
test_mode=True,
pipeline=val_pipeline,)
),
test=dict(
type=dataset_type,
batch_ratios=1,
test_mode=True,
dataset=dict(
type="DavarRCGDataset",
data_type='LMDB_Standard',
ann_file='IIIT5k_3000',
img_prefix='/path/to/evaluation/',
batch_max_length=25,
used_ratio=1,
test_mode=True,
pipeline=test_pipeline, ),
)
)
"""
4. Runtime Settings
include information about checkpoint, logging, evaluation, workflow,
pretrained models and other defined parameters during runtime.
"""
checkpoint_config = dict(type="DavarCheckpointHook",
interval=1,
iter_interval=5000,
by_epoch=True,
by_iter=True,
filename_tmpl='ckpt/res32_ace_e{}.pth',
metric="accuracy",
rule="greater",
save_mode="lightweight",
init_metric=-1,
model_milestone=0.5
)
# logger setting
log_config = dict(
interval=50,
hooks=[dict(type='TextLoggerHook'), ])
evaluation = dict(start=3,
start_iter=0.5,
save_best="accuracy",
interval=1,
iter_interval=5000,
model_type="RECOGNIZOR",
eval_mode="lightweight",
by_epoch=True,
by_iter=True,
rule="greater",
metric=['accuracy', 'NED'],
)
# evaluation = dict(type="DavarDistEvalHook",
# interval=1,
# model_type="recognizor",
# save_best="accuracy",
# eval_mode="general",
# by_epoch=True,
# rule="greater",
# metric=['accuracy', 'NED'],
# )
# runner setting
runner = dict(type='EpochBasedRunner', max_epochs=6)
# must specify this parameter
find_unused_parameters = True
# Load from Pre-trained model path
load_from = '/path/to/davar_opensource/rflearning_visual/RFL_visual_pretrained-2654bc6b.pth'
# work directory
work_dir = '/path/to/davar_opensource/rflearning_total/'
# distributed training setting
dist_params = dict(backend='nccl')
| [
"[email protected]"
] | |
a3747ed815403f95d7732066115c2c6a00eb89b8 | 17079988dedef6f830633a7a54b181355231fe3e | /Car/Car3.py | db4c58f81a811a3cd44229bac7dcc8e68dec6f07 | [] | no_license | sum008/python-backup | cdf6eaff60d882c36fe86b47ad311955d5869b02 | 729fbe2a5220941f9ba085c693c871592a529da8 | refs/heads/master | 2022-12-12T21:21:48.259680 | 2020-09-12T15:36:05 | 2020-09-12T15:36:05 | 285,461,845 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,812 | py | import pygame as p
import vector_
import math
p.init()
width=600
height=600
display=p.display.set_mode((width,height))
image=p.image.load("car.png")
run=True
velocity=vector_.vector_functions()
x=200
y=200
l=0
position=vector_.vector_functions()
position.create(x, y)
velocity.set_length(l)
position.add_to_xy(velocity)
accelerate = vector_.vector_functions()
accelerate.create(0.6, 0.6)
friction = 0.98
angle=0
image=p.transform.scale(image, (30, 55))
image=p.transform.rotate(image, -270)
acc=0.5
deacc=3
lastangle=angle
move="null"
last_dir="null"
while run:
display.fill((0,150,0))
angle_rot = velocity.get_angle()
img=p.transform.rotate(image, angle)
getrect=img.get_rect()
getrect.center=(position.getX()%width,position.getY()%height)
display.blit(img,getrect)
velocity.set_angle(lastangle)
b=p.Vector2(0,0)
a=p.Vector2(velocity.getX(),velocity.getY())
if last_dir=="r":
b=p.Vector2(velocity.getX()+100,velocity.getY()+100)
elif last_dir=="l":
b=p.Vector2(velocity.getX()-100,velocity.getY()-100)
c=a-b
if c[0]!=0 or c[1]!=0:
c=c.normalize()*0.9
print(c)
vel2=vector_.vector_functions()
vel2.create(velocity.getX()+c[0], velocity.getY()+c[1])
position.add_to_xy(vel2)
for event in p.event.get():
if event.type==p.QUIT:
run=False
keys=p.key.get_pressed()
if keys[p.K_LEFT] and abs(velocity.get_length())>0.75:
angle=(angle+1)%360
lastangle=-angle*0.0174
if move=="r":
lastangle=math.pi-(angle*0.0174)
else:
lastangle=-angle*0.0174
last_dir="l"
if keys[p.K_RIGHT] and abs(velocity.get_length())>0.75:
angle=(angle-1)%360
if move=="r":
lastangle=math.pi-(angle*0.0174)
else:
lastangle=-angle*0.0174
last_dir="r"
if keys[p.K_UP]: #Accelerate
if(velocity.get_length()<10):
velocity.set_length(velocity.get_length()+acc)
lastangle=-angle*0.0174
move="null"
# if keys[p.K_DOWN] and velocity.get_length()>0.75: #Brakes
# velocity.set_length(velocity.get_length()-acc)
# lastangle=-(angle*0.0174)
# move="null"
if keys[p.K_DOWN]:# and velocity.get_length()<=0:
velocity.set_length(velocity.get_length()-deacc)
lastangle=math.pi-(angle*0.0174)
move="r"
if velocity.get_length()<0.5:
velocity.set_length(0)
last_dir="null"
velocity.set_length(velocity.get_length()*friction)
print(position.getX(),angle,getrect)
p.display.flip()
p.time.Clock().tick(60)
| [
"[email protected]"
] | |
12625896c8c00d5dc56d6d965956ae0ad0522e0b | 95269decf471db39653d12c8592b0b30f2ed7a5d | /tensorflow/python/layers/core.py | bdbbc59eaf05e1f6286340b544085a86c4bcd0bb | [
"Apache-2.0"
] | permissive | alvarofernandoms/tensorflow | 99679dc36ecb61354e24b5b48b914446800b83f3 | 972a454f47a94fe263c7a889603bbbb36f16c32c | refs/heads/master | 2020-04-10T13:07:28.654916 | 2018-03-07T21:36:07 | 2018-03-07T21:36:07 | 124,273,027 | 0 | 0 | Apache-2.0 | 2018-03-07T17:49:18 | 2018-03-07T17:49:17 | null | UTF-8 | Python | false | false | 16,513 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import standard_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export('layers.Dense')
class Dense(base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name,
activity_regularizer=activity_regularizer,
**kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
self.kernel_regularizer = kernel_regularizer
self.bias_regularizer = bias_regularizer
self.kernel_constraint = kernel_constraint
self.bias_constraint = bias_constraint
self.input_spec = base.InputSpec(min_ndim=2)
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
self.input_spec = base.InputSpec(min_ndim=2,
axes={-1: input_shape[-1].value})
self.kernel = self.add_variable('kernel',
shape=[input_shape[-1].value, self.units],
initializer=self.kernel_initializer,
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = self.add_variable('bias',
shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
self.built = True
def call(self, inputs):
inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
shape = inputs.get_shape().as_list()
if len(shape) > 2:
# Broadcasting is required for the inputs.
outputs = standard_ops.tensordot(inputs, self.kernel, [[len(shape) - 1],
[0]])
# Reshape the output back to the original ndim of the input.
if context.in_graph_mode():
output_shape = shape[:-1] + [self.units]
outputs.set_shape(output_shape)
else:
outputs = gen_math_ops.mat_mul(inputs, self.kernel)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
input_shape = input_shape.with_rank_at_least(2)
if input_shape[-1].value is None:
raise ValueError(
'The innermost dimension of input_shape must be defined, but saw: %s'
% input_shape)
return input_shape[:-1].concatenate(self.units)
@tf_export('layers.dense')
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs.kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
dtype=inputs.dtype.base_dtype,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@tf_export('layers.Dropout')
class Dropout(base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(name=name, **kwargs)
self.rate = rate
self.noise_shape = noise_shape
self.seed = seed
def _get_noise_shape(self, inputs):
# Subclasses of `Dropout` may implement `_get_noise_shape(self, inputs)`,
# which will override `self.noise_shape`, and allows for custom noise
# shapes with dynamically sized inputs.
if self.noise_shape is None:
return self.noise_shape
return nn_ops._get_noise_shape(inputs, self.noise_shape)
def call(self, inputs, training=False):
def dropped_inputs():
return nn.dropout(inputs, 1 - self.rate,
noise_shape=self._get_noise_shape(inputs),
seed=self.seed)
return utils.smart_cond(training,
dropped_inputs,
lambda: array_ops.identity(inputs))
def compute_output_shape(self, input_shape):
return input_shape
@tf_export('layers.dropout')
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
@tf_export('layers.Flatten')
class Flatten(base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
def __init__(self, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.input_spec = base.InputSpec(min_ndim=2)
def call(self, inputs):
outputs = array_ops.reshape(inputs, (array_ops.shape(inputs)[0], -1))
if context.in_graph_mode():
outputs.set_shape(self.compute_output_shape(inputs.get_shape()))
return outputs
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
output_shape = [input_shape[0]]
if all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:])]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
@tf_export('layers.flatten')
def flatten(inputs, name=None):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
Returns:
Reshaped tensor.
Examples:
```
x = tf.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
layer = Flatten(name=name)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
| [
"[email protected]"
] | |
ac56f5e8e9874e1a72c9f0a01d547345569ccffd | 2c1a2724d4e1edfd99597ef700624650de7ed5b6 | /amazon_cells_labelled.py | a79c25b69e01af09a06da35634be9b2c0c26803b | [] | no_license | hellosandeep1999/Machine_Learning | d91dd3b2930fef69cc1c6b6409b6591c4b8ca2e7 | 20b6296009c2a7844ad8d06d3e43b53b30a4b450 | refs/heads/master | 2022-10-11T17:52:50.610155 | 2020-06-08T02:53:02 | 2020-06-08T02:53:02 | 257,350,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,762 | py | # -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:07:17 2020
@author: user
"""
"""
Q1. Code Challegene (NLP)
Dataset: amazon_cells_labelled.txt
The Data has sentences from Amazon Reviews
Each line in Data Set is tagged positive or negative
Create a Machine learning model using Natural Language Processing that can
predict wheter a given review about the product is positive or negative
"""
import pandas as pd
# Importing the dataset
# Ignore double qoutes, use 3
dataset = pd.read_csv('amazon_cells_labelled.txt', delimiter = '\t')
dataset.columns = ["sms","label"]
import nltk
# download the latest list of stopwords from Standford Server
#nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import re
corpus = []
for i in range(0, 999):
sms = re.sub('[^a-zA-Z]', ' ', dataset['sms'][i])
sms = sms.lower()
sms = sms.split()
sms = [word for word in sms if not word in set(stopwords.words('english'))]
ps = PorterStemmer()
sms = [ps.stem(word) for word in sms]
sms = ' '.join(sms)
corpus.append(sms)
print(corpus)
print(len(corpus))
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features = 2000)
# it is known as sparse matrix of the features ND Array
features = cv.fit_transform(corpus).toarray() # 2000 columns
labels = dataset.iloc[:, 1].values
print(features.shape)
print(labels.shape)
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
features_train, features_test, labels_train, labels_test = \
train_test_split(features, labels, test_size = 0.20, random_state = 0)
#applying knn on this text dataset
# Fitting Knn to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier()
classifier.fit(features_train, labels_train)
# Predicting the Test set results
labels_pred = classifier.predict(features_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_knn = confusion_matrix(labels_test, labels_pred)
print(cm_knn) #0.72
# for better NLP results we need lot of data
-----------------------------------------------------------------------
# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(features_train, labels_train)
# Predicting the Test set results
labels_pred = classifier.predict(features_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm_nb = confusion_matrix(labels_test, labels_pred)
print(cm_nb) #0.72
#it means Naive bayes and K nearest Neighbors have same solution
| [
"[email protected]"
] | |
15d56b6e629ac4c09ec545de90e42842dda401f5 | 55dd731810dfae13bad4ffa9ddf415dc57c6dc19 | /projects/DensePose/densepose/densepose_head.py | e2c499b667fce44cb044a29766257bbc228d1909 | [
"Apache-2.0"
] | permissive | neurolaboratories/detectron2 | cf87081a2cd44bb90a93b194e5e424ac0c55c7b7 | e36814661e13ffcbb5900e8e3a1b857c78dc24fa | refs/heads/master | 2022-12-11T15:24:19.036952 | 2020-09-14T12:39:31 | 2020-09-14T12:39:31 | 291,991,620 | 12 | 0 | Apache-2.0 | 2020-09-14T12:39:32 | 2020-09-01T12:27:44 | Python | UTF-8 | Python | false | false | 57,774 | py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import math
from dataclasses import dataclass
from enum import Enum
from typing import Iterable, List, Optional, Tuple
import fvcore.nn.weight_init as weight_init
import torch
from torch import nn
from torch.nn import functional as F
from detectron2.config import CfgNode
from detectron2.layers import Conv2d, ConvTranspose2d, interpolate
from detectron2.structures import Instances
from detectron2.structures.boxes import matched_boxlist_iou
from detectron2.utils.registry import Registry
from .data.structures import DensePoseOutput
ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD")
class DensePoseUVConfidenceType(Enum):
"""
Statistical model type for confidence learning, possible values:
- "iid_iso": statistically independent identically distributed residuals
with anisotropic covariance
- "indep_aniso": statistically independent residuals with anisotropic
covariances
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
# fmt: off
IID_ISO = "iid_iso"
INDEP_ANISO = "indep_aniso"
# fmt: on
@dataclass
class DensePoseUVConfidenceConfig:
"""
Configuration options for confidence on UV data
"""
enabled: bool = False
# lower bound on UV confidences
epsilon: float = 0.01
type: DensePoseUVConfidenceType = DensePoseUVConfidenceType.IID_ISO
@dataclass
class DensePoseSegmConfidenceConfig:
"""
Configuration options for confidence on segmentation
"""
enabled: bool = False
# lower bound on confidence values
epsilon: float = 0.01
@dataclass
class DensePoseConfidenceModelConfig:
"""
Configuration options for confidence models
"""
# confidence for U and V values
uv_confidence: DensePoseUVConfidenceConfig
# segmentation confidence
segm_confidence: DensePoseSegmConfidenceConfig
@staticmethod
def from_cfg(cfg: CfgNode) -> "DensePoseConfidenceModelConfig":
return DensePoseConfidenceModelConfig(
uv_confidence=DensePoseUVConfidenceConfig(
enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.ENABLED,
epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON,
type=DensePoseUVConfidenceType(cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE),
),
segm_confidence=DensePoseSegmConfidenceConfig(
enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.ENABLED,
epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON,
),
)
def initialize_module_params(module):
for name, param in module.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0)
elif "weight" in name:
nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
@ROI_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseDeepLabHead(nn.Module):
def __init__(self, cfg, input_channels):
super(DensePoseDeepLabHead, self).__init__()
# fmt: off
hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM
self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
self.use_nonlocal = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON
# fmt: on
pad_size = kernel_size // 2
n_channels = input_channels
self.ASPP = ASPP(input_channels, [6, 12, 56], n_channels) # 6, 12, 56
self.add_module("ASPP", self.ASPP)
if self.use_nonlocal:
self.NLBlock = NONLocalBlock2D(input_channels, bn_layer=True)
self.add_module("NLBlock", self.NLBlock)
# weight_init.c2_msra_fill(self.ASPP)
for i in range(self.n_stacked_convs):
norm_module = nn.GroupNorm(32, hidden_dim) if norm == "GN" else None
layer = Conv2d(
n_channels,
hidden_dim,
kernel_size,
stride=1,
padding=pad_size,
bias=not norm,
norm=norm_module,
)
weight_init.c2_msra_fill(layer)
n_channels = hidden_dim
layer_name = self._get_layer_name(i)
self.add_module(layer_name, layer)
self.n_out_channels = hidden_dim
# initialize_module_params(self)
def forward(self, features):
x0 = features
x = self.ASPP(x0)
if self.use_nonlocal:
x = self.NLBlock(x)
output = x
for i in range(self.n_stacked_convs):
layer_name = self._get_layer_name(i)
x = getattr(self, layer_name)(x)
x = F.relu(x)
output = x
return output
def _get_layer_name(self, i: int):
layer_name = "body_conv_fcn{}".format(i + 1)
return layer_name
# Copied from
# https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py
# See https://arxiv.org/pdf/1706.05587.pdf for details
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(
in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False
),
nn.GroupNorm(32, out_channels),
nn.ReLU(),
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.GroupNorm(32, out_channels),
nn.ReLU(),
)
def forward(self, x):
size = x.shape[-2:]
x = super(ASPPPooling, self).forward(x)
return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels):
super(ASPP, self).__init__()
modules = []
modules.append(
nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.GroupNorm(32, out_channels),
nn.ReLU(),
)
)
rate1, rate2, rate3 = tuple(atrous_rates)
modules.append(ASPPConv(in_channels, out_channels, rate1))
modules.append(ASPPConv(in_channels, out_channels, rate2))
modules.append(ASPPConv(in_channels, out_channels, rate3))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
# nn.BatchNorm2d(out_channels),
nn.ReLU()
# nn.Dropout(0.5)
)
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
# copied from
# https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_embedded_gaussian.py
# See https://arxiv.org/abs/1711.07971 for details
class _NonLocalBlockND(nn.Module):
def __init__(
self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True
):
super(_NonLocalBlockND, self).__init__()
assert dimension in [1, 2, 3]
self.dimension = dimension
self.sub_sample = sub_sample
self.in_channels = in_channels
self.inter_channels = inter_channels
if self.inter_channels is None:
self.inter_channels = in_channels // 2
if self.inter_channels == 0:
self.inter_channels = 1
if dimension == 3:
conv_nd = nn.Conv3d
max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
bn = nn.GroupNorm # (32, hidden_dim) #nn.BatchNorm3d
elif dimension == 2:
conv_nd = nn.Conv2d
max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm2d
else:
conv_nd = nn.Conv1d
max_pool_layer = nn.MaxPool1d(kernel_size=2)
bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm1d
self.g = conv_nd(
in_channels=self.in_channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
padding=0,
)
if bn_layer:
self.W = nn.Sequential(
conv_nd(
in_channels=self.inter_channels,
out_channels=self.in_channels,
kernel_size=1,
stride=1,
padding=0,
),
bn(32, self.in_channels),
)
nn.init.constant_(self.W[1].weight, 0)
nn.init.constant_(self.W[1].bias, 0)
else:
self.W = conv_nd(
in_channels=self.inter_channels,
out_channels=self.in_channels,
kernel_size=1,
stride=1,
padding=0,
)
nn.init.constant_(self.W.weight, 0)
nn.init.constant_(self.W.bias, 0)
self.theta = conv_nd(
in_channels=self.in_channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
padding=0,
)
self.phi = conv_nd(
in_channels=self.in_channels,
out_channels=self.inter_channels,
kernel_size=1,
stride=1,
padding=0,
)
if sub_sample:
self.g = nn.Sequential(self.g, max_pool_layer)
self.phi = nn.Sequential(self.phi, max_pool_layer)
def forward(self, x):
"""
:param x: (b, c, t, h, w)
:return:
"""
batch_size = x.size(0)
g_x = self.g(x).view(batch_size, self.inter_channels, -1)
g_x = g_x.permute(0, 2, 1)
theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
theta_x = theta_x.permute(0, 2, 1)
phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
f = torch.matmul(theta_x, phi_x)
f_div_C = F.softmax(f, dim=-1)
y = torch.matmul(f_div_C, g_x)
y = y.permute(0, 2, 1).contiguous()
y = y.view(batch_size, self.inter_channels, *x.size()[2:])
W_y = self.W(y)
z = W_y + x
return z
class NONLocalBlock2D(_NonLocalBlockND):
def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
super(NONLocalBlock2D, self).__init__(
in_channels,
inter_channels=inter_channels,
dimension=2,
sub_sample=sub_sample,
bn_layer=bn_layer,
)
@ROI_DENSEPOSE_HEAD_REGISTRY.register()
class DensePoseV1ConvXHead(nn.Module):
def __init__(self, cfg, input_channels):
super(DensePoseV1ConvXHead, self).__init__()
# fmt: off
hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
# fmt: on
pad_size = kernel_size // 2
n_channels = input_channels
for i in range(self.n_stacked_convs):
layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size)
layer_name = self._get_layer_name(i)
self.add_module(layer_name, layer)
n_channels = hidden_dim
self.n_out_channels = n_channels
initialize_module_params(self)
def forward(self, features):
x = features
output = x
for i in range(self.n_stacked_convs):
layer_name = self._get_layer_name(i)
x = getattr(self, layer_name)(x)
x = F.relu(x)
output = x
return output
def _get_layer_name(self, i):
layer_name = "body_conv_fcn{}".format(i + 1)
return layer_name
class DensePosePredictor(nn.Module):
def __init__(self, cfg, input_channels):
super(DensePosePredictor, self).__init__()
dim_in = input_channels
n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
self.ann_index_lowres = ConvTranspose2d(
dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.index_uv_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.u_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.v_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
self._initialize_confidence_estimation_layers(cfg, self.confidence_model_cfg, dim_in)
initialize_module_params(self)
def forward(self, head_outputs):
ann_index_lowres = self.ann_index_lowres(head_outputs)
index_uv_lowres = self.index_uv_lowres(head_outputs)
u_lowres = self.u_lowres(head_outputs)
v_lowres = self.v_lowres(head_outputs)
def interp2d(input):
return interpolate(
input, scale_factor=self.scale_factor, mode="bilinear", align_corners=False
)
ann_index = interp2d(ann_index_lowres)
index_uv = interp2d(index_uv_lowres)
u = interp2d(u_lowres)
v = interp2d(v_lowres)
(
(sigma_1, sigma_2, kappa_u, kappa_v, fine_segm_confidence, coarse_segm_confidence),
(
sigma_1_lowres,
sigma_2_lowres,
kappa_u_lowres,
kappa_v_lowres,
fine_segm_confidence_lowres,
coarse_segm_confidence_lowres,
),
(ann_index, index_uv),
) = self._forward_confidence_estimation_layers(
self.confidence_model_cfg, head_outputs, interp2d, ann_index, index_uv
)
return (
(ann_index, index_uv, u, v),
(ann_index_lowres, index_uv_lowres, u_lowres, v_lowres),
(sigma_1, sigma_2, kappa_u, kappa_v, fine_segm_confidence, coarse_segm_confidence),
(
sigma_1_lowres,
sigma_2_lowres,
kappa_u_lowres,
kappa_v_lowres,
fine_segm_confidence_lowres,
coarse_segm_confidence_lowres,
),
)
def _initialize_confidence_estimation_layers(
self, cfg: CfgNode, confidence_model_cfg: DensePoseConfidenceModelConfig, dim_in: int
):
dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
if confidence_model_cfg.uv_confidence.enabled:
if confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
self.sigma_2_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
elif confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
self.sigma_2_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.kappa_u_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.kappa_v_lowres = ConvTranspose2d(
dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
else:
raise ValueError(
f"Unknown confidence model type: {confidence_model_cfg.confidence_model_type}"
)
if confidence_model_cfg.segm_confidence.enabled:
self.fine_segm_confidence_lowres = ConvTranspose2d(
dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
self.coarse_segm_confidence_lowres = ConvTranspose2d(
dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
)
def _forward_confidence_estimation_layers(
self, confidence_model_cfg, head_outputs, interp2d, ann_index, index_uv
):
sigma_1, sigma_2, kappa_u, kappa_v = None, None, None, None
sigma_1_lowres, sigma_2_lowres, kappa_u_lowres, kappa_v_lowres = None, None, None, None
fine_segm_confidence_lowres, fine_segm_confidence = None, None
coarse_segm_confidence_lowres, coarse_segm_confidence = None, None
if confidence_model_cfg.uv_confidence.enabled:
if confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
sigma_2_lowres = self.sigma_2_lowres(head_outputs)
sigma_2 = interp2d(sigma_2_lowres)
elif confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
sigma_2_lowres = self.sigma_2_lowres(head_outputs)
kappa_u_lowres = self.kappa_u_lowres(head_outputs)
kappa_v_lowres = self.kappa_v_lowres(head_outputs)
sigma_2 = interp2d(sigma_2_lowres)
kappa_u = interp2d(kappa_u_lowres)
kappa_v = interp2d(kappa_v_lowres)
else:
raise ValueError(
f"Unknown confidence model type: {confidence_model_cfg.confidence_model_type}"
)
if confidence_model_cfg.segm_confidence.enabled:
fine_segm_confidence_lowres = self.fine_segm_confidence_lowres(head_outputs)
fine_segm_confidence = interp2d(fine_segm_confidence_lowres)
fine_segm_confidence = (
F.softplus(fine_segm_confidence) + confidence_model_cfg.segm_confidence.epsilon
)
index_uv = index_uv * torch.repeat_interleave(
fine_segm_confidence, index_uv.shape[1], dim=1
)
coarse_segm_confidence_lowres = self.coarse_segm_confidence_lowres(head_outputs)
coarse_segm_confidence = interp2d(coarse_segm_confidence_lowres)
coarse_segm_confidence = (
F.softplus(coarse_segm_confidence) + confidence_model_cfg.segm_confidence.epsilon
)
ann_index = ann_index * torch.repeat_interleave(
coarse_segm_confidence, ann_index.shape[1], dim=1
)
return (
(sigma_1, sigma_2, kappa_u, kappa_v, fine_segm_confidence, coarse_segm_confidence),
(
sigma_1_lowres,
sigma_2_lowres,
kappa_u_lowres,
kappa_v_lowres,
fine_segm_confidence_lowres,
coarse_segm_confidence_lowres,
),
(ann_index, index_uv),
)
class DensePoseDataFilter(object):
def __init__(self, cfg):
self.iou_threshold = cfg.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD
self.keep_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
@torch.no_grad()
def __call__(self, features: List[torch.Tensor], proposals_with_targets: List[Instances]):
"""
Filters proposals with targets to keep only the ones relevant for
DensePose training
Args:
features (list[Tensor]): input data as a list of features,
each feature is a tensor. Axis 0 represents the number of
images `N` in the input data; axes 1-3 are channels,
height, and width, which may vary between features
(e.g., if a feature pyramid is used).
proposals_with_targets (list[Instances]): length `N` list of
`Instances`. The i-th `Instances` contains instances
(proposals, GT) for the i-th input image,
"""
proposals_filtered = []
# TODO: the commented out code was supposed to correctly deal with situations
# where no valid DensePose GT is available for certain images. The corresponding
# image features were sliced and proposals were filtered. This led to performance
# deterioration, both in terms of runtime and in terms of evaluation results.
#
# feature_mask = torch.ones(
# len(proposals_with_targets),
# dtype=torch.bool,
# device=features[0].device if len(features) > 0 else torch.device("cpu"),
# )
for i, proposals_per_image in enumerate(proposals_with_targets):
if not proposals_per_image.has("gt_densepose") and (
not proposals_per_image.has("gt_masks") or not self.keep_masks
):
# feature_mask[i] = 0
continue
gt_boxes = proposals_per_image.gt_boxes
est_boxes = proposals_per_image.proposal_boxes
# apply match threshold for densepose head
iou = matched_boxlist_iou(gt_boxes, est_boxes)
iou_select = iou > self.iou_threshold
proposals_per_image = proposals_per_image[iou_select]
N_gt_boxes = len(proposals_per_image.gt_boxes)
assert N_gt_boxes == len(proposals_per_image.proposal_boxes), (
f"The number of GT boxes {N_gt_boxes} is different from the "
f"number of proposal boxes {len(proposals_per_image.proposal_boxes)}"
)
# filter out any target without suitable annotation
if self.keep_masks:
gt_masks = (
proposals_per_image.gt_masks
if hasattr(proposals_per_image, "gt_masks")
else [None] * N_gt_boxes
)
else:
gt_masks = [None] * N_gt_boxes
gt_densepose = (
proposals_per_image.gt_densepose
if hasattr(proposals_per_image, "gt_densepose")
else [None] * N_gt_boxes
)
assert len(gt_masks) == N_gt_boxes
assert len(gt_densepose) == N_gt_boxes
selected_indices = [
i
for i, (dp_target, mask_target) in enumerate(zip(gt_densepose, gt_masks))
if (dp_target is not None) or (mask_target is not None)
]
# if not len(selected_indices):
# feature_mask[i] = 0
# continue
if len(selected_indices) != N_gt_boxes:
proposals_per_image = proposals_per_image[selected_indices]
assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes)
proposals_filtered.append(proposals_per_image)
# features_filtered = [feature[feature_mask] for feature in features]
# return features_filtered, proposals_filtered
return features, proposals_filtered
def build_densepose_head(cfg, input_channels):
head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME
return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels)
def build_densepose_predictor(cfg, input_channels):
predictor = DensePosePredictor(cfg, input_channels)
return predictor
def build_densepose_data_filter(cfg):
dp_filter = DensePoseDataFilter(cfg)
return dp_filter
def densepose_inference(
densepose_outputs: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
densepose_confidences: Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor],
detections: List[Instances],
):
"""
Infer dense pose estimate based on outputs from the DensePose head
and detections. The estimate for each detection instance is stored in its
"pred_densepose" attribute.
Args:
densepose_outputs (tuple(`torch.Tensor`)): iterable containing 4 elements:
- s (:obj: `torch.Tensor`): coarse segmentation tensor of size (N, A, H, W),
- i (:obj: `torch.Tensor`): fine segmentation tensor of size (N, C, H, W),
- u (:obj: `torch.Tensor`): U coordinates for each class of size (N, C, H, W),
- v (:obj: `torch.Tensor`): V coordinates for each class of size (N, C, H, W),
where N is the total number of detections in a batch,
A is the number of coarse segmentations labels
(e.g. 15 for coarse body parts + background),
C is the number of fine segmentation labels
(e.g. 25 for fine body parts + background),
W is the resolution along the X axis
H is the resolution along the Y axis
densepose_confidences (tuple(`torch.Tensor`)): iterable containing 4 elements:
- sigma_1 (:obj: `torch.Tensor`): global confidences for UV coordinates
of size (N, C, H, W)
- sigma_2 (:obj: `torch.Tensor`): individual confidences for UV coordinates
of size (N, C, H, W)
- kappa_u (:obj: `torch.Tensor`): first component of confidence direction
vector of size (N, C, H, W)
- kappa_v (:obj: `torch.Tensor`): second component of confidence direction
vector of size (N, C, H, W)
- fine_segm_confidence (:obj: `torch.Tensor`): confidence for fine
segmentation of size (N, 1, H, W)
- coarse_segm_confidence (:obj: `torch.Tensor`): confidence for coarse
segmentation of size (N, 1, H, W)
detections (list[Instances]): A list of N Instances, where N is the number of images
in the batch. Instances are modified by this method: "pred_densepose" attribute
is added to each instance, the attribute contains the corresponding
DensePoseOutput object.
"""
# DensePose outputs: segmentation, body part indices, U, V
s, index_uv, u, v = densepose_outputs
(
sigma_1,
sigma_2,
kappa_u,
kappa_v,
fine_segm_confidence,
coarse_segm_confidence,
) = densepose_confidences
k = 0
for detection in detections:
n_i = len(detection)
s_i = s[k : k + n_i]
index_uv_i = index_uv[k : k + n_i]
u_i = u[k : k + n_i]
v_i = v[k : k + n_i]
_local_vars = locals()
confidences = {
name: _local_vars[name][k : k + n_i]
for name in (
"sigma_1",
"sigma_2",
"kappa_u",
"kappa_v",
"fine_segm_confidence",
"coarse_segm_confidence",
)
if _local_vars.get(name) is not None
}
densepose_output_i = DensePoseOutput(s_i, index_uv_i, u_i, v_i, confidences)
detection.pred_densepose = densepose_output_i
k += n_i
def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z):
"""
Computes utility values for linear interpolation at points v.
The points are given as normalized offsets in the source interval
(v0_src, v0_src + size_src), more precisely:
v = v0_src + v_norm * size_src / 256.0
The computed utilities include lower points v_lo, upper points v_hi,
interpolation weights v_w and flags j_valid indicating whether the
points falls into the destination interval (v0_dst, v0_dst + size_dst).
Args:
v_norm (:obj: `torch.Tensor`): tensor of size N containing
normalized point offsets
v0_src (:obj: `torch.Tensor`): tensor of size N containing
left bounds of source intervals for normalized points
size_src (:obj: `torch.Tensor`): tensor of size N containing
source interval sizes for normalized points
v0_dst (:obj: `torch.Tensor`): tensor of size N containing
left bounds of destination intervals
size_dst (:obj: `torch.Tensor`): tensor of size N containing
destination interval sizes
size_z (int): interval size for data to be interpolated
Returns:
v_lo (:obj: `torch.Tensor`): int tensor of size N containing
indices of lower values used for interpolation, all values are
integers from [0, size_z - 1]
v_hi (:obj: `torch.Tensor`): int tensor of size N containing
indices of upper values used for interpolation, all values are
integers from [0, size_z - 1]
v_w (:obj: `torch.Tensor`): float tensor of size N containing
interpolation weights
j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing
0 for points outside the estimation interval
(v0_est, v0_est + size_est) and 1 otherwise
"""
v = v0_src + v_norm * size_src / 256.0
j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst)
v_grid = (v - v0_dst) * size_z / size_dst
v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1)
v_hi = (v_lo + 1).clamp(max=size_z - 1)
v_grid = torch.min(v_hi.float(), v_grid)
v_w = v_grid - v_lo.float()
return v_lo, v_hi, v_w, j_valid
def _grid_sampling_utilities(
zh, zw, bbox_xywh_est, bbox_xywh_gt, index_gt, x_norm, y_norm, index_bbox
):
"""
Prepare tensors used in grid sampling.
Args:
z_est (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with estimated
values of Z to be extracted for the points X, Y and channel
indices I
bbox_xywh_est (:obj: `torch.Tensor`): tensor of size (N, 4) containing
estimated bounding boxes in format XYWH
bbox_xywh_gt (:obj: `torch.Tensor`): tensor of size (N, 4) containing
matched ground truth bounding boxes in format XYWH
index_gt (:obj: `torch.Tensor`): tensor of size K with point labels for
ground truth points
x_norm (:obj: `torch.Tensor`): tensor of size K with X normalized
coordinates of ground truth points. Image X coordinates can be
obtained as X = Xbbox + x_norm * Wbbox / 255
y_norm (:obj: `torch.Tensor`): tensor of size K with Y normalized
coordinates of ground truth points. Image Y coordinates can be
obtained as Y = Ybbox + y_norm * Hbbox / 255
index_bbox (:obj: `torch.Tensor`): tensor of size K with bounding box
indices for each ground truth point. The values are thus in
[0, N-1]
Returns:
j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing
0 for points to be discarded and 1 for points to be selected
y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values
in z_est for each point
y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values
in z_est for each point
x_lo (:obj: `torch.Tensor`): int tensor of indices of left values
in z_est for each point
x_hi (:obj: `torch.Tensor`): int tensor of indices of right values
in z_est for each point
w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M;
contains upper-left value weight for each point
w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M;
contains upper-right value weight for each point
w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M;
contains lower-left value weight for each point
w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M;
contains lower-right value weight for each point
"""
x0_gt, y0_gt, w_gt, h_gt = bbox_xywh_gt[index_bbox].unbind(dim=1)
x0_est, y0_est, w_est, h_est = bbox_xywh_est[index_bbox].unbind(dim=1)
x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities(
x_norm, x0_gt, w_gt, x0_est, w_est, zw
)
y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities(
y_norm, y0_gt, h_gt, y0_est, h_est, zh
)
j_valid = jx_valid * jy_valid
w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w)
w_ylo_xhi = x_w * (1.0 - y_w)
w_yhi_xlo = (1.0 - x_w) * y_w
w_yhi_xhi = x_w * y_w
return j_valid, y_lo, y_hi, x_lo, x_hi, w_ylo_xlo, w_ylo_xhi, w_yhi_xlo, w_yhi_xhi
def _extract_at_points_packed(
z_est,
index_bbox_valid,
slice_index_uv,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
):
"""
Extract ground truth values z_gt for valid point indices and estimated
values z_est using bilinear interpolation over top-left (y_lo, x_lo),
top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right
(y_hi, x_hi) values in z_est with corresponding weights:
w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi.
Use slice_index_uv to slice dim=1 in z_est
"""
z_est_sampled = (
z_est[index_bbox_valid, slice_index_uv, y_lo, x_lo] * w_ylo_xlo
+ z_est[index_bbox_valid, slice_index_uv, y_lo, x_hi] * w_ylo_xhi
+ z_est[index_bbox_valid, slice_index_uv, y_hi, x_lo] * w_yhi_xlo
+ z_est[index_bbox_valid, slice_index_uv, y_hi, x_hi] * w_yhi_xhi
)
return z_est_sampled
def _resample_data(
z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode="nearest", padding_mode="zeros"
):
"""
Args:
z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be
resampled
bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing
source bounding boxes in format XYWH
bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing
destination bounding boxes in format XYWH
Return:
zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout)
with resampled values of z, where D is the discretization size
"""
n = bbox_xywh_src.size(0)
assert n == bbox_xywh_dst.size(0), (
"The number of "
"source ROIs for resampling ({}) should be equal to the number "
"of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0))
)
x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1)
x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1)
x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1
y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1
x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1
y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1
grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout
grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout
grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout)
grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout)
dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout)
dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout)
x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout)
y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout)
grid_x = grid_w_expanded * dx_expanded + x0_expanded
grid_y = grid_h_expanded * dy_expanded + y0_expanded
grid = torch.stack((grid_x, grid_y), dim=3)
# resample Z from (N, C, H, W) into (N, C, Hout, Wout)
zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
return zresampled
def _extract_single_tensors_from_matches_one_image(
proposals_targets, bbox_with_dp_offset, bbox_global_offset
):
i_gt_all = []
x_norm_all = []
y_norm_all = []
u_gt_all = []
v_gt_all = []
s_gt_all = []
bbox_xywh_gt_all = []
bbox_xywh_est_all = []
# Ibbox_all == k should be true for all data that corresponds
# to bbox_xywh_gt[k] and bbox_xywh_est[k]
# index k here is global wrt images
i_bbox_all = []
# at offset k (k is global) contains index of bounding box data
# within densepose output tensor
i_with_dp = []
boxes_xywh_est = proposals_targets.proposal_boxes.clone()
boxes_xywh_gt = proposals_targets.gt_boxes.clone()
n_i = len(boxes_xywh_est)
assert n_i == len(boxes_xywh_gt)
if n_i:
boxes_xywh_est.tensor[:, 2] -= boxes_xywh_est.tensor[:, 0]
boxes_xywh_est.tensor[:, 3] -= boxes_xywh_est.tensor[:, 1]
boxes_xywh_gt.tensor[:, 2] -= boxes_xywh_gt.tensor[:, 0]
boxes_xywh_gt.tensor[:, 3] -= boxes_xywh_gt.tensor[:, 1]
if hasattr(proposals_targets, "gt_densepose"):
densepose_gt = proposals_targets.gt_densepose
for k, box_xywh_est, box_xywh_gt, dp_gt in zip(
range(n_i), boxes_xywh_est.tensor, boxes_xywh_gt.tensor, densepose_gt
):
if (dp_gt is not None) and (len(dp_gt.x) > 0):
i_gt_all.append(dp_gt.i)
x_norm_all.append(dp_gt.x)
y_norm_all.append(dp_gt.y)
u_gt_all.append(dp_gt.u)
v_gt_all.append(dp_gt.v)
s_gt_all.append(dp_gt.segm.unsqueeze(0))
bbox_xywh_gt_all.append(box_xywh_gt.view(-1, 4))
bbox_xywh_est_all.append(box_xywh_est.view(-1, 4))
i_bbox_k = torch.full_like(dp_gt.i, bbox_with_dp_offset + len(i_with_dp))
i_bbox_all.append(i_bbox_k)
i_with_dp.append(bbox_global_offset + k)
return (
i_gt_all,
x_norm_all,
y_norm_all,
u_gt_all,
v_gt_all,
s_gt_all,
bbox_xywh_gt_all,
bbox_xywh_est_all,
i_bbox_all,
i_with_dp,
)
def _extract_single_tensors_from_matches(proposals_with_targets):
i_img = []
i_gt_all = []
x_norm_all = []
y_norm_all = []
u_gt_all = []
v_gt_all = []
s_gt_all = []
bbox_xywh_gt_all = []
bbox_xywh_est_all = []
i_bbox_all = []
i_with_dp_all = []
n = 0
for i, proposals_targets_per_image in enumerate(proposals_with_targets):
n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0)
if not n_i:
continue
(
i_gt_img,
x_norm_img,
y_norm_img,
u_gt_img,
v_gt_img,
s_gt_img,
bbox_xywh_gt_img,
bbox_xywh_est_img,
i_bbox_img,
i_with_dp_img,
) = _extract_single_tensors_from_matches_one_image( # noqa
proposals_targets_per_image, len(i_with_dp_all), n
)
i_gt_all.extend(i_gt_img)
x_norm_all.extend(x_norm_img)
y_norm_all.extend(y_norm_img)
u_gt_all.extend(u_gt_img)
v_gt_all.extend(v_gt_img)
s_gt_all.extend(s_gt_img)
bbox_xywh_gt_all.extend(bbox_xywh_gt_img)
bbox_xywh_est_all.extend(bbox_xywh_est_img)
i_bbox_all.extend(i_bbox_img)
i_with_dp_all.extend(i_with_dp_img)
i_img.extend([i] * len(i_with_dp_img))
n += n_i
# concatenate all data into a single tensor
if (n > 0) and (len(i_with_dp_all) > 0):
i_gt = torch.cat(i_gt_all, 0).long()
x_norm = torch.cat(x_norm_all, 0)
y_norm = torch.cat(y_norm_all, 0)
u_gt = torch.cat(u_gt_all, 0)
v_gt = torch.cat(v_gt_all, 0)
s_gt = torch.cat(s_gt_all, 0)
bbox_xywh_gt = torch.cat(bbox_xywh_gt_all, 0)
bbox_xywh_est = torch.cat(bbox_xywh_est_all, 0)
i_bbox = torch.cat(i_bbox_all, 0).long()
else:
i_gt = None
x_norm = None
y_norm = None
u_gt = None
v_gt = None
s_gt = None
bbox_xywh_gt = None
bbox_xywh_est = None
i_bbox = None
return (
i_img,
i_with_dp_all,
bbox_xywh_est,
bbox_xywh_gt,
i_gt,
x_norm,
y_norm,
u_gt,
v_gt,
s_gt,
i_bbox,
)
@dataclass
class DataForMaskLoss:
"""
Contains mask GT and estimated data for proposals from multiple images:
"""
# tensor of size (K, H, W) containing GT labels
masks_gt: Optional[torch.Tensor] = None
# tensor of size (K, C, H, W) containing estimated scores
masks_est: Optional[torch.Tensor] = None
def _extract_data_for_mask_loss_from_matches(
proposals_targets: Iterable[Instances], estimated_segm: torch.Tensor
) -> DataForMaskLoss:
"""
Extract data for mask loss from instances that contain matched GT and
estimated bounding boxes.
Args:
proposals_targets: Iterable[Instances]
matched GT and estimated results, each item in the iterable
corresponds to data in 1 image
estimated_segm: torch.Tensor if size
size to which GT masks are resized
Return:
masks_est: tensor(K, C, H, W) of float - class scores
masks_gt: tensor(K, H, W) of int64 - labels
"""
data = DataForMaskLoss()
masks_gt = []
offset = 0
assert estimated_segm.shape[2] == estimated_segm.shape[3], (
f"Expected estimated segmentation to have a square shape, "
f"but the actual shape is {estimated_segm.shape[2:]}"
)
mask_size = estimated_segm.shape[2]
num_proposals = sum(inst.proposal_boxes.tensor.size(0) for inst in proposals_targets)
num_estimated = estimated_segm.shape[0]
assert (
num_proposals == num_estimated
), "The number of proposals {} must be equal to the number of estimates {}".format(
num_proposals, num_estimated
)
for proposals_targets_per_image in proposals_targets:
n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0)
if not n_i:
continue
gt_masks_per_image = proposals_targets_per_image.gt_masks.crop_and_resize(
proposals_targets_per_image.proposal_boxes.tensor, mask_size
).to(device=estimated_segm.device)
masks_gt.append(gt_masks_per_image)
offset += n_i
if masks_gt:
data.masks_est = estimated_segm
data.masks_gt = torch.cat(masks_gt, dim=0)
return data
class IIDIsotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of iid residuals with isotropic covariance:
$Sigma_i = sigma_i^2 I$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: float):
super(IIDIsotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(
self,
u: torch.Tensor,
v: torch.Tensor,
sigma_u: torch.Tensor,
target_u: torch.Tensor,
target_v: torch.Tensor,
):
# compute $\sigma_i^2$
# use sigma_lower_bound to avoid degenerate solution for variance
# (sigma -> 0)
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
# compute \|delta_i\|^2
delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
# the total loss from the formula above:
loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2)
return loss.sum()
class IndepAnisotropicGaussianUVLoss(nn.Module):
"""
Loss for the case of independent residuals with anisotropic covariances:
$Sigma_i = sigma_i^2 I + r_i r_i^T$
The loss (negative log likelihood) is then:
$1/2 sum_{i=1}^n (log(2 pi)
+ log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ ||delta_i||^2 / sigma_i^2
- <delta_i, r_i>^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
difference between estimated and ground truth UV values
For details, see:
N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
"""
def __init__(self, sigma_lower_bound: float):
super(IndepAnisotropicGaussianUVLoss, self).__init__()
self.sigma_lower_bound = sigma_lower_bound
self.log2pi = math.log(2 * math.pi)
def forward(
self,
u: torch.Tensor,
v: torch.Tensor,
sigma_u: torch.Tensor,
kappa_u_est: torch.Tensor,
kappa_v_est: torch.Tensor,
target_u: torch.Tensor,
target_v: torch.Tensor,
):
# compute $\sigma_i^2$
sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
# compute \|r_i\|^2
r_sqnorm2 = kappa_u_est ** 2 + kappa_v_est ** 2
delta_u = u - target_u
delta_v = v - target_v
# compute \|delta_i\|^2
delta_sqnorm = delta_u ** 2 + delta_v ** 2
delta_u_r_u = delta_u * kappa_u_est
delta_v_r_v = delta_v * kappa_v_est
# compute the scalar product <delta_i, r_i>
delta_r = delta_u_r_u + delta_v_r_v
# compute squared scalar product <delta_i, r_i>^2
delta_r_sqnorm = delta_r ** 2
denom2 = sigma2 * (sigma2 + r_sqnorm2)
loss = 0.5 * (
self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2
)
return loss.sum()
class DensePoseLosses(object):
def __init__(self, cfg):
# fmt: off
self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
self.w_points = cfg.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS
self.w_part = cfg.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS
self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS
self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
# fmt: on
self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss(
self.confidence_model_cfg.uv_confidence.epsilon
)
elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss(
self.confidence_model_cfg.uv_confidence.epsilon
)
def __call__(self, proposals_with_gt, densepose_outputs, densepose_confidences):
if not self.segm_trained_by_masks:
return self.produce_densepose_losses(
proposals_with_gt, densepose_outputs, densepose_confidences
)
else:
losses = {}
losses_densepose = self.produce_densepose_losses(
proposals_with_gt, densepose_outputs, densepose_confidences
)
losses.update(losses_densepose)
losses_mask = self.produce_mask_losses(
proposals_with_gt, densepose_outputs, densepose_confidences
)
losses.update(losses_mask)
return losses
def produce_fake_mask_losses(self, densepose_outputs):
losses = {}
segm_scores, _, _, _ = densepose_outputs
losses["loss_densepose_S"] = segm_scores.sum() * 0
return losses
def produce_mask_losses(self, proposals_with_gt, densepose_outputs, densepose_confidences):
if not len(proposals_with_gt):
return self.produce_fake_mask_losses(densepose_outputs)
losses = {}
# densepose outputs are computed for all images and all bounding boxes;
# i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively,
# the outputs will have size(0) == 3+1+2+1 == 7
segm_scores, _, _, _ = densepose_outputs
with torch.no_grad():
mask_loss_data = _extract_data_for_mask_loss_from_matches(
proposals_with_gt, segm_scores
)
if (mask_loss_data.masks_gt is None) or (mask_loss_data.masks_est is None):
return self.produce_fake_mask_losses(densepose_outputs)
losses["loss_densepose_S"] = (
F.cross_entropy(mask_loss_data.masks_est, mask_loss_data.masks_gt.long()) * self.w_segm
)
return losses
def produce_fake_densepose_losses(self, densepose_outputs, densepose_confidences):
# we need to keep the same computation graph on all the GPUs to
# perform reduction properly. Hence even if we have no data on one
# of the GPUs, we still need to generate the computation graph.
# Add fake (zero) losses in the form Tensor.sum() * 0
s, index_uv, u, v = densepose_outputs
conf_type = self.confidence_model_cfg.uv_confidence.type
(
sigma_1,
sigma_2,
kappa_u,
kappa_v,
fine_segm_confidence,
coarse_segm_confidence,
) = densepose_confidences
losses = {}
losses["loss_densepose_I"] = index_uv.sum() * 0
if not self.segm_trained_by_masks:
losses["loss_densepose_S"] = s.sum() * 0
if self.confidence_model_cfg.uv_confidence.enabled:
losses["loss_densepose_UV"] = (u.sum() + v.sum()) * 0
if conf_type == DensePoseUVConfidenceType.IID_ISO:
losses["loss_densepose_UV"] += sigma_2.sum() * 0
elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO:
losses["loss_densepose_UV"] += (sigma_2.sum() + kappa_u.sum() + kappa_v.sum()) * 0
else:
losses["loss_densepose_U"] = u.sum() * 0
losses["loss_densepose_V"] = v.sum() * 0
return losses
def produce_densepose_losses(self, proposals_with_gt, densepose_outputs, densepose_confidences):
losses = {}
# densepose outputs are computed for all images and all bounding boxes;
# i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively,
# the outputs will have size(0) == 3+1+2+1 == 7
s, index_uv, u, v = densepose_outputs
if not len(proposals_with_gt):
return self.produce_fake_densepose_losses(densepose_outputs, densepose_confidences)
(
sigma_1,
sigma_2,
kappa_u,
kappa_v,
fine_segm_confidence,
coarse_segm_confidence,
) = densepose_confidences
conf_type = self.confidence_model_cfg.uv_confidence.type
assert u.size(2) == v.size(2)
assert u.size(3) == v.size(3)
assert u.size(2) == index_uv.size(2)
assert u.size(3) == index_uv.size(3)
with torch.no_grad():
(
index_uv_img,
i_with_dp,
bbox_xywh_est,
bbox_xywh_gt,
index_gt_all,
x_norm,
y_norm,
u_gt_all,
v_gt_all,
s_gt,
index_bbox,
) = _extract_single_tensors_from_matches( # noqa
proposals_with_gt
)
n_batch = len(i_with_dp)
# NOTE: we need to keep the same computation graph on all the GPUs to
# perform reduction properly. Hence even if we have no data on one
# of the GPUs, we still need to generate the computation graph.
# Add fake (zero) loss in the form Tensor.sum() * 0
if not n_batch:
return self.produce_fake_densepose_losses(densepose_outputs, densepose_confidences)
zh = u.size(2)
zw = u.size(3)
(
j_valid,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
) = _grid_sampling_utilities( # noqa
zh, zw, bbox_xywh_est, bbox_xywh_gt, index_gt_all, x_norm, y_norm, index_bbox
)
j_valid_fg = j_valid * (index_gt_all > 0)
u_gt = u_gt_all[j_valid_fg]
u_est_all = _extract_at_points_packed(
u[i_with_dp],
index_bbox,
index_gt_all,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
)
u_est = u_est_all[j_valid_fg]
v_gt = v_gt_all[j_valid_fg]
v_est_all = _extract_at_points_packed(
v[i_with_dp],
index_bbox,
index_gt_all,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
)
v_est = v_est_all[j_valid_fg]
index_uv_gt = index_gt_all[j_valid]
index_uv_est_all = _extract_at_points_packed(
index_uv[i_with_dp],
index_bbox,
slice(None),
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo[:, None],
w_ylo_xhi[:, None],
w_yhi_xlo[:, None],
w_yhi_xhi[:, None],
)
index_uv_est = index_uv_est_all[j_valid, :]
if self.confidence_model_cfg.uv_confidence.enabled:
sigma_2_est_all = _extract_at_points_packed(
sigma_2[i_with_dp],
index_bbox,
index_gt_all,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
)
sigma_2_est = sigma_2_est_all[j_valid_fg]
if conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]:
kappa_u_est_all = _extract_at_points_packed(
kappa_u[i_with_dp],
index_bbox,
index_gt_all,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
)
kappa_u_est = kappa_u_est_all[j_valid_fg]
kappa_v_est_all = _extract_at_points_packed(
kappa_v[i_with_dp],
index_bbox,
index_gt_all,
y_lo,
y_hi,
x_lo,
x_hi,
w_ylo_xlo,
w_ylo_xhi,
w_yhi_xlo,
w_yhi_xhi,
)
kappa_v_est = kappa_v_est_all[j_valid_fg]
# Resample everything to the estimated data size, no need to resample
# S_est then:
if not self.segm_trained_by_masks:
s_est = s[i_with_dp]
with torch.no_grad():
s_gt = _resample_data(
s_gt.unsqueeze(1),
bbox_xywh_gt,
bbox_xywh_est,
self.heatmap_size,
self.heatmap_size,
mode="nearest",
padding_mode="zeros",
).squeeze(1)
# add point-based losses:
if self.confidence_model_cfg.uv_confidence.enabled:
if conf_type == DensePoseUVConfidenceType.IID_ISO:
uv_loss = (
self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt)
* self.w_points
)
losses["loss_densepose_UV"] = uv_loss
elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO:
uv_loss = (
self.uv_loss_with_confidences(
u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt
)
* self.w_points
)
losses["loss_densepose_UV"] = uv_loss
else:
raise ValueError(f"Unknown confidence model type: {conf_type}")
else:
u_loss = F.smooth_l1_loss(u_est, u_gt, reduction="sum") * self.w_points
losses["loss_densepose_U"] = u_loss
v_loss = F.smooth_l1_loss(v_est, v_gt, reduction="sum") * self.w_points
losses["loss_densepose_V"] = v_loss
index_uv_loss = F.cross_entropy(index_uv_est, index_uv_gt.long()) * self.w_part
losses["loss_densepose_I"] = index_uv_loss
if not self.segm_trained_by_masks:
if self.n_segm_chan == 2:
s_gt = s_gt > 0
s_loss = F.cross_entropy(s_est, s_gt.long()) * self.w_segm
losses["loss_densepose_S"] = s_loss
return losses
def build_densepose_losses(cfg):
losses = DensePoseLosses(cfg)
return losses
| [
"[email protected]"
] | |
1d26a049c57d17eed0c9ca0b3efec122a762a23c | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/apimanagement/v20201201/get_api_policy.py | 6924530cfd3623114e7744c89265ceeb2de1896e | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,113 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetApiPolicyResult',
'AwaitableGetApiPolicyResult',
'get_api_policy',
'get_api_policy_output',
]
@pulumi.output_type
class GetApiPolicyResult:
"""
Policy Contract details.
"""
def __init__(__self__, format=None, id=None, name=None, type=None, value=None):
if format and not isinstance(format, str):
raise TypeError("Expected argument 'format' to be a str")
pulumi.set(__self__, "format", format)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if value and not isinstance(value, str):
raise TypeError("Expected argument 'value' to be a str")
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Format of the policyContent.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type for API Management resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> str:
"""
Contents of the Policy as defined by the format.
"""
return pulumi.get(self, "value")
class AwaitableGetApiPolicyResult(GetApiPolicyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApiPolicyResult(
format=self.format,
id=self.id,
name=self.name,
type=self.type,
value=self.value)
def get_api_policy(api_id: Optional[str] = None,
format: Optional[str] = None,
policy_id: Optional[str] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApiPolicyResult:
"""
Policy Contract details.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
__args__ = dict()
__args__['apiId'] = api_id
__args__['format'] = format
__args__['policyId'] = policy_id
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20201201:getApiPolicy', __args__, opts=opts, typ=GetApiPolicyResult).value
return AwaitableGetApiPolicyResult(
format=__ret__.format,
id=__ret__.id,
name=__ret__.name,
type=__ret__.type,
value=__ret__.value)
@_utilities.lift_output_func(get_api_policy)
def get_api_policy_output(api_id: Optional[pulumi.Input[str]] = None,
format: Optional[pulumi.Input[Optional[str]]] = None,
policy_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetApiPolicyResult]:
"""
Policy Contract details.
:param str api_id: API revision identifier. Must be unique in the current API Management service instance. Non-current revision has ;rev=n as a suffix where n is the revision number.
:param str format: Policy Export Format.
:param str policy_id: The identifier of the Policy.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
"""
...
| [
"[email protected]"
] | |
7800d2c0d0a64a5bc8c602666596e3007524a6ca | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_199/3905.py | 40351dcbf8a07ac6656283972fb4be0fa055fb3c | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,041 | py | #!/usr/local/bin/python
import sys
def flip(s, n, i):
return tuple( not s[j] if i <= j and j < i + n else s[j] for j in range(len(s)) )
def answer(initial, n):
l = len(initial)
maxflip = l - (n - 1)
worklist = [initial]
states = { initial: 0 }
while worklist:
state = worklist.pop(0)
flips = states[state]
#print state
if all(state):
return flips
for i in range(maxflip):
newstate = flip(state, n, i)
if newstate not in states:
states[newstate] = flips + 1
worklist.append(newstate)
return 'IMPOSSIBLE'
def solve():
with open(sys.argv[1]) as f:
f.readline()
i = 1
for line in f:
parts = line.strip().split(' ')
n = int(parts[1])
ps = tuple( c == '+' for c in parts[0] )
result = answer(ps, n)
print('Case #{}: {}'.format(i, result))
i = i + 1
solve()
| [
"[email protected]"
] | |
0c425e4fe95e0209d87b445dc116be63eb4ffdda | bc11e10521fa313d83011e77a2c31a0b6ed581af | /lib/rubyfox/server/data/lib/Lib/test/test_rfc822.py | 470b92878fe1a54f6ff113544a80e5f9235b3637 | [
"MIT"
] | permissive | neopoly/rubyfox-server | f6f191c68dcc30b8c56d22c8209e4a69251f4f27 | 26d67687fc642111ef8d02507f2b567828bd1ebd | refs/heads/master | 2023-07-20T15:04:32.028192 | 2023-07-17T09:16:36 | 2023-07-17T09:33:20 | 6,457,322 | 3 | 4 | MIT | 2020-08-11T06:53:50 | 2012-10-30T13:06:32 | Python | UTF-8 | Python | false | false | 7,518 | py | import rfc822
import sys
import test_support
import unittest
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class MessageTestCase(unittest.TestCase):
def create_message(self, msg):
return rfc822.Message(StringIO(msg))
def test_get(self):
msg = self.create_message(
'To: "last, first" <[email protected]>\n\ntest\n')
self.assert_(msg.get("to") == '"last, first" <[email protected]>')
self.assert_(msg.get("TO") == '"last, first" <[email protected]>')
self.assert_(msg.get("No-Such-Header") is None)
self.assert_(msg.get("No-Such-Header", "No-Such-Value")
== "No-Such-Value")
def test_setdefault(self):
msg = self.create_message(
'To: "last, first" <[email protected]>\n\ntest\n')
self.assert_(not msg.has_key("New-Header"))
self.assert_(msg.setdefault("New-Header", "New-Value") == "New-Value")
self.assert_(msg.setdefault("New-Header", "Different-Value")
== "New-Value")
self.assert_(msg["new-header"] == "New-Value")
self.assert_(msg.setdefault("Another-Header") == "")
self.assert_(msg["another-header"] == "")
def check(self, msg, results):
"""Check addresses and the date."""
m = self.create_message(msg)
i = 0
for n, a in m.getaddrlist('to') + m.getaddrlist('cc'):
try:
mn, ma = results[i][0], results[i][1]
except IndexError:
print 'extra parsed address:', repr(n), repr(a)
continue
i = i + 1
if mn == n and ma == a:
pass
else:
print 'not found:', repr(n), repr(a)
out = m.getdate('date')
if out:
self.assertEqual(out,
(1999, 1, 13, 23, 57, 35, 0, 0, 0),
"date conversion failed")
# Note: all test cases must have the same date (in various formats),
# or no date!
def test_basic(self):
self.check(
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'From: Guido van Rossum <[email protected]>\n'
'To: "Guido van\n'
'\t : Rossum" <[email protected]>\n'
'Subject: test2\n'
'\n'
'test2\n',
[('Guido van\n\t : Rossum', '[email protected]')])
self.check(
'From: Barry <[email protected]\n'
'To: [email protected] (Guido: the Barbarian)\n'
'Subject: nonsense\n'
'Date: Wednesday, January 13 1999 23:57:35 -0500\n'
'\n'
'test',
[('Guido: the Barbarian', '[email protected]')])
self.check(
'From: Barry <[email protected]\n'
'To: [email protected] (Guido: the Barbarian)\n'
'Cc: "Guido: the Madman" <[email protected]>\n'
'Date: 13-Jan-1999 23:57:35 EST\n'
'\n'
'test',
[('Guido: the Barbarian', '[email protected]'),
('Guido: the Madman', '[email protected]')
])
self.check(
'To: "The monster with\n'
' the very long name: Guido" <[email protected]>\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('The monster with\n the very long name: Guido',
'[email protected]')])
self.check(
'To: "Amit J. Patel" <[email protected]>\n'
'CC: Mike Fletcher <[email protected]>,\n'
' "\'[email protected]\'" <[email protected]>\n'
'Cc: [email protected], [email protected]\n'
'Cc: [email protected]\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('Amit J. Patel', '[email protected]'),
('Mike Fletcher', '[email protected]'),
("'[email protected]'", '[email protected]'),
('', '[email protected]'),
('', '[email protected]'),
('', '[email protected]'),
])
self.check(
'To: Some One <[email protected]>\n'
'From: Anudder Persin <[email protected]>\n'
'Date:\n'
'\n'
'test',
[('Some One', '[email protected]')])
self.check(
'To: [email protected] (User J. Person)\n\n',
[('User J. Person', '[email protected]')])
def test_twisted(self):
# This one is just twisted. I don't know what the proper
# result should be, but it shouldn't be to infloop, which is
# what used to happen!
self.check(
'To: <[smtp:[email protected]][email protected]>\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('', ''),
('', '[email protected]'),
('', '[email protected]'),
])
def test_commas_in_full_name(self):
# This exercises the old commas-in-a-full-name bug, which
# should be doing the right thing in recent versions of the
# module.
self.check(
'To: "last, first" <[email protected]>\n'
'\n'
'test',
[('last, first', '[email protected]')])
def test_quoted_name(self):
self.check(
'To: (Comment stuff) "Quoted name"@somewhere.com\n'
'\n'
'test',
[('Comment stuff', '"Quoted name"@somewhere.com')])
def test_bogus_to_header(self):
self.check(
'To: :\n'
'Cc: [email protected]\n'
'Date: Wed, 13 Jan 1999 23:57:35 -0500\n'
'\n'
'test',
[('', '[email protected]')])
def test_addr_ipquad(self):
self.check(
'To: guido@[132.151.1.21]\n'
'\n'
'foo',
[('', 'guido@[132.151.1.21]')])
def test_rfc2822_phrases(self):
# RFC 2822 (the update to RFC 822) specifies that dots in phrases are
# obsolete syntax, which conforming programs MUST recognize but NEVER
# generate (see $4.1 Miscellaneous obsolete tokens). This is a
# departure from RFC 822 which did not allow dots in non-quoted
# phrases.
self.check('To: User J. Person <[email protected]>\n\n',
[('User J. Person', '[email protected]')])
# This takes to long to add to the test suite
## def test_an_excrutiatingly_long_address_field(self):
## OBSCENELY_LONG_HEADER_MULTIPLIER = 10000
## oneaddr = ('Person' * 10) + '@' + ('.'.join(['dom']*10)) + '.com'
## addr = ', '.join([oneaddr] * OBSCENELY_LONG_HEADER_MULTIPLIER)
## lst = rfc822.AddrlistClass(addr).getaddrlist()
## self.assertEqual(len(lst), OBSCENELY_LONG_HEADER_MULTIPLIER)
def test_parseaddr(self):
eq = self.assertEqual
eq(rfc822.parseaddr('<>'), ('', ''))
eq(rfc822.parseaddr('[email protected]'), ('', '[email protected]'))
eq(rfc822.parseaddr('[email protected] (Bea A. Person)'),
('Bea A. Person', '[email protected]'))
eq(rfc822.parseaddr('Cynthia Person <[email protected]>'),
('Cynthia Person', '[email protected]'))
def test_main():
test_support.run_unittest(MessageTestCase)
if __name__ == "__main__":
test_main()
| [
"[email protected]"
] | |
a2f07df3af8ee8974943f2f72af4f1d8e8c2c4f0 | 12091b1c0723759464f949b0a47b305c76549278 | /tests/test_pedreader.py | ae00fa7fdd2ada7ea03426fe560e3525311546bc | [
"MIT"
] | permissive | whatshap/whatshap | 6311e13d36210f395206683bb00b2054ef639653 | 15c9ff8c4f5b04b86195396dbc6620c874b5ceb8 | refs/heads/main | 2023-09-04T07:58:09.567203 | 2023-08-31T08:45:45 | 2023-08-31T08:45:45 | 276,673,862 | 254 | 27 | MIT | 2023-09-10T06:47:19 | 2020-07-02T14:53:00 | Python | UTF-8 | Python | false | false | 883 | py | import io
from pytest import raises
from whatshap.pedigree import PedReader, Trio, ParseError
class TestPedReader:
def test_parse(self):
trios = list(PedReader("tests/data/pedigree.ped"))
assert trios[0] == Trio(child="child1", mother="mother", father="father")
assert trios[1] == Trio(child="child2", mother="mother", father="father")
assert trios[2] == Trio(child="father", mother=None, father=None)
assert trios[3] == Trio(child="mother", mother=None, father=None)
assert trios[4] == Trio(child="orphan", mother=None, father=None)
def test_parse_error(self):
f = io.StringIO("buggy file")
with raises(ParseError):
list(PedReader(f))
def test_duplicate_individual(self):
f = io.StringIO("f1 c m f 0 1\nf1 c m f 0 1")
with raises(ParseError):
list(PedReader(f))
| [
"[email protected]"
] | |
c095c9ea75894cf94711c006a112018463e2b36e | a80963fbac8c0edcef5b1b9bad67a4b5913cd569 | /tuple1.py | 5d2d9ca7cc039ecf65294f12eb7021528954ca32 | [] | no_license | manurp/Python_programs | 946877caf93a2ff0239c68dc4e8e02c72fe6f156 | 1a0c896b05b72afee7a48dd1bc2bef2aa7ffe0af | refs/heads/master | 2020-06-28T01:22:59.140092 | 2018-09-01T17:03:18 | 2018-09-01T17:03:18 | 97,080,367 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64 | py | n=input()
print(hash(tuple([int(i) for i in input().split()])))
| [
"[email protected]"
] | |
5bb44b04e369f75bbfa730979f359f87774d86b2 | f61cf1a24fa184dd552dd47dd8399b74c5816ee0 | /tasks/10/10-10.py | e06737083170e2f17c7ee5587f9bbf2b030af1e0 | [] | no_license | desve/netology | ea585d9db8658eea5319b98f63259239fa538fcb | c6039cc831058b8ba650d417fae25f761520139b | refs/heads/master | 2020-01-23T21:11:31.291807 | 2017-04-06T05:19:08 | 2017-04-06T05:19:08 | 74,572,766 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | # Полустепени вершин
n = 5
a= [[0, 0, 0, 0, 0],
[0, 0, 0, 0, 1],
[1, 1, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]
]
c1 = [0] * n
for i in range(n):
c11 = 0
for j in range(n):
if a[i][j] == 1:
c11 += 1
c1[i] = c11
c2 = [0] * n
for i in range(n):
c22 = 0
for j in range(n):
if a[j][i] == 1:
c22 += 1
c2[i] = c22
for i in range(n):
print("Вершина", i+1)
print("Заходов", c2[i])
print("Исходов", c1[i])
print("--------------------")
| [
"[email protected]"
] | |
729f408065f93d454e748e77b04b04967fb88c26 | 5cc204e2ecb9a756127e7c71633a1edcdb3e989b | /pylmp/InKim/LAMMPS_getViscosity.py | 549a600e96d47dab74637bbb64c9f0a909aae361 | [] | no_license | hopefulp/sandbox | 1a1d518cf7b5e6bca2b2776be1cac3d27fc4bcf8 | 4d26767f287be6abc88dc74374003b04d509bebf | refs/heads/master | 2023-06-27T17:50:16.637851 | 2023-06-15T03:53:39 | 2023-06-15T03:53:39 | 218,209,112 | 1 | 0 | null | 2022-09-13T13:22:34 | 2019-10-29T05:14:02 | C++ | UTF-8 | Python | false | false | 3,560 | py | #!/home/noische/program/python27/bin/python
"""
template.py
Original: Dec 28 2011 In Kim
"""
# Python Modules
import sys
import os
import string
import random
import time
import getopt
import pprint
# Custom Modules
sys.path.append("/home/noische/scripts")
sys.path.append("/home/noische/script")
import bgf
import bgftools
import nutils as nu
import scipy
# Globals
version = '111228'
def getViscosity(log_file, profile_file, out_file, silent=False):
"""
def template():
Write what this function does.
Function Parameters:
log_file A string of filename or BgfFile class.
profile_file A string of filename or BgfFile class.
"""
# Initialize
log_data = [];
profile_data = [];
boxlength = 0;
f_out_file = open(out_file, 'w')
# Load log_file (dP)
f_log_file = open(log_file);
while 1:
line = f_log_file.readline()
if not line:
break;
if "Step dp TotEng Temp" in line:
break;
if "Box length" in line and "print" not in line:
parse = line.split()
boxlength = float(parse[-1])
while 1:
line = f_log_file.readline()
if not line:
break;
# log_data: [Step dp TotEng Temp]
parse = line.split()
if len(parse) != 4:
break;
log_data.append([int(parse[0]), float(parse[1]), float(parse[2]), float(parse[3])])
# Load .profile and calculate dvx/dvz
f_profile_file = open(profile_file);
while 1:
timestep = 0; bin = 0;
vx = []; vz = [];
line = f_profile_file.readline()
if not line:
break;
if "#" in line:
continue;
parse = line.split()
if len(parse) == 2:
timestep = int(parse[0])
bin = int(parse[1])
# read vz-vx pairs
for i in range(0, bin):
dataline = f_profile_file.readline()
parsedata = dataline.split()
vz.append(float(parsedata[1])*boxlength)
vx.append(float(parsedata[3]))
if len(vz) != bin or len(vx) != bin:
nu.die("The number of vectors for linear regression in the profile file does not match.")
# regression of vx wrt vz (i.e. x = vz, y = vx in common case)
(ar, br) = scipy.polyfit(vz, vx, 1)
temp = [timestep, ar]
profile_data.append(temp)
#f_out_file.write(str(temp)+"\n") # profile reader looks good 2012.2.2
# merge two data: log and profile
# merged_data: [Step dp TotEng Temp (dvx/dvz)]
merged_data = [];
for item1 in log_data:
for item2 in profile_data:
if item1[0] == item2[0]:
temp = item1 + item2[1:]
merged_data.append(temp)
# viscosity = - dp / (dvx/dvz)
for i in merged_data:
vis = -1 * i[1] / i[4]
i.append(vis)
for i in merged_data:
line = "";
for j in i:
line += str(j) + "\t"
line += "\n"
f_out_file.write(line)
# close files
f_out_file.close()
### end of template
if __name__ == '__main__':
option = ""; args = ""; log_file = ""; size = 0.0; profile_file = ""; out_file = "";
number = 0
usage = """
Usage: LAMMPS_getViscosity.py -l logfile -p profile -o output
Options are:
-b Input BGF file.
-o Output BGF file.
"""
if len(sys.argv) < 2:
print(usage); sys.exit(0)
options, args = getopt.getopt(sys.argv[1:], 'hl:p:o:', ['help','log=','profile=','out='])
for option, value in options:
if option in ('-h', '--help'):
print usage; sys.exit(0)
elif option in ('-l', '--log'):
log_file = value
elif option in ('-o', '--output'):
out_file= value
elif option in ('-p', '--profile'):
profile_file = value
elif option in (''):
print(usage); sys.exit(0)
# default settings
if not out_file: out_file = os.path.basename(log_file).split(".log")[0] + "" + ".output"
getViscosity(log_file, profile_file, out_file, silent=False)
| [
"[email protected]"
] | |
4b5a64ff4dfa130bfdb064b4d689d22a6410ef8d | e8d719fe45dfbff9cbbc4ed872832cec6cabaca6 | /21_Merge_Two_Sorted_Lists.py | eb738c549da826985571891c40557a9c19c0cf19 | [] | no_license | nlfox/leetcode | 64f4f48d7f4be6df0542e51cc7037df40bf184a3 | d61363f99de3d591ebc8cd94f62544a31a026d55 | refs/heads/master | 2020-12-21T01:43:01.792899 | 2016-11-14T23:10:12 | 2016-11-14T23:10:12 | 56,680,839 | 2 | 0 | null | 2016-05-17T17:16:37 | 2016-04-20T11:19:58 | Python | UTF-8 | Python | false | false | 1,386 | py | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
def construct(l):
res = ListNode(l[0])
head = res
for i in l[1:]:
res.next = ListNode(i)
res = res.next
return head
def pri(node):
p = node
while p:
print p.val,
p = p.next
class Solution(object):
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
p1, p2 = l1, l2
new = None
head = None
if p1 and p2:
new = ListNode(p2.val) if p1.val > p2.val else ListNode(p1.val)
head = new
if p1.val > p2.val:
p2 = p2.next
else:
p1 = p1.next
while p1 and p2:
if p1.val > p2.val:
new.next = ListNode(p2.val)
new = new.next
p2 = p2.next
else:
new.next = ListNode(p1.val)
new = new.next
p1 = p1.next
if p1:
new.next = p1
if p2:
new.next = p2
else:
head = p1 if p1 else p2
return head
pri(Solution().mergeTwoLists(construct([1, 2, 3]),construct([2, 3, 4])))
| [
"[email protected]"
] | |
f1290c1967e70ae6adf02d821fa34d407fc96a9a | e68fc7302d123d26f9e1d49c7877a3c2367cf676 | /config.py | 736f1d23ed37ee8f5ae1c0e589c3cb4efcf23da0 | [
"MIT"
] | permissive | bmeares/sso | 56ae3fb4336f4864e346d3cc366117b96e3f3a0c | d589098c6b6c8510815669184da84e0b561df90d | refs/heads/master | 2023-06-01T10:09:04.656903 | 2021-06-05T17:01:15 | 2021-06-05T17:01:15 | 364,969,061 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,802 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
from meerschaum.utils.prompt import prompt
from meerschaum.config import get_plugin_config, write_plugin_config
GOOGLE_CONF_URL = 'https://accounts.google.com/.well-known/openid-configuration'
FACEBOOK_CONF_DICT = {
'api_base_url': 'https://graph.facebook.com/v10.0/',
'access_token_url': 'https://graph.facebook.com/v10.0/oauth/access_token',
'authorize_url': 'https://www.facebook.com/v10.0/dialog/oauth',
'userinfo_endpoint': 'me?fields=id,name,first_name,middle_name,last_name,email,website,gender,locale',
# 'response_type': 'token',
# 'state': "{st=state123abc,ds=123456789}",
}
def get_sso_config(*args, **kw):
_cf = get_plugin_config(*args, warn=False, **{k:v for k in kw if k != 'warn'})
if _cf is None:
_db_label = prompt('Wedding database label:', default='wedding_s')
_prepend = prompt('Prepend path to /sso (blank for the root to be /sso):')
_google_id = prompt('Google Client ID:')
_google_secret = prompt('Google Client Secret:', is_password=True)
_google_callback = prompt('Google Callback URL:')
_facebook_id = prompt('Facebook App ID:')
_facebook_secret = prompt('Facebook App Secret:')
_facebook_callback = prompt('Facebook Callback URL:')
_cf = {
'prepend' : _prepend,
'google' : {
'id' : _google_id, 'secret' : _google_secret, 'callback' : _google_callback,
},
'facebook' : {
'id' : _facebook_id, 'secret' : _facebook_secret, 'callback' : _facebook_callback,
},
}
write_sso_config(_cf)
return get_plugin_config(*args, **kw)
def write_sso_config(config, **kw):
write_plugin_config(config)
| [
"[email protected]"
] | |
4dc66b762f786b20e0db445dc0eefe4d7163e1b7 | e10a6d844a286db26ef56469e31dc8488a8c6f0e | /dedal/train/training_loop.py | f371d2facd4a2c4143d83f03d601fae58f31ef3b | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | Jimmy-INL/google-research | 54ad5551f97977f01297abddbfc8a99a7900b791 | 5573d9c5822f4e866b6692769963ae819cb3f10d | refs/heads/master | 2023-04-07T19:43:54.483068 | 2023-03-24T16:27:28 | 2023-03-24T16:32:17 | 282,682,170 | 1 | 0 | Apache-2.0 | 2020-07-26T15:50:32 | 2020-07-26T15:50:31 | null | UTF-8 | Python | false | false | 15,902 | py | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Custom training loop for all sequence alignments experiments."""
import enum
import itertools
from typing import Iterator, Mapping, Optional, Sequence, Tuple, Type, Union
from absl import logging
import gin
import tensorflow as tf
import tensorflow_datasets as tfds
from dedal import multi_task
from dedal.data import builder
from dedal.train import checkpoint
from dedal.train import logger
Builder = Union[builder.DatasetBuilder, builder.MultiDatasetBuilder]
Example = Mapping[str, tf.Tensor]
def get_strategy(use_tpu = False,
tpu_job_name = 'tpu_worker',
master = 'local'):
"""Builds the proper strategy based on the parameters.
Args:
use_tpu: whether the job must be trained on tpu or not.
tpu_job_name: the name of the tpu_job if applies.
master: the master job name.
Returns:
A tf.distribute.Strategy.
"""
use_remote_eager = master and master != 'local'
if use_tpu:
logging.info('Use TPU at %s with job name "%s".', master, tpu_job_name)
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu=master, job_name=tpu_job_name)
if use_remote_eager:
tf.config.experimental_connect_to_cluster(resolver)
logging.warning('Remote eager configured. Remote eager can be slow.')
tf.tpu.experimental.initialize_tpu_system(resolver)
strategy = tf.distribute.TPUStrategy(resolver)
else:
if use_remote_eager:
tf.config.experimental_connect_to_host(master, job_name='gpu_worker')
logging.warning('Remote eager configured. Remote eager can be slow.')
gpus = tf.config.experimental.list_logical_devices(device_type='GPU')
if gpus:
logging.info('Found GPUs: %s', gpus)
strategy = tf.distribute.MirroredStrategy()
else:
strategy = tf.distribute.OneDeviceStrategy('CPU')
return strategy
@gin.constants_from_enum
class Task(enum.Enum):
TRAIN = 0
EVAL = 1
DOWNSTREAM = 2
@gin.configurable
class TrainingLoop:
"""Hand made training loop."""
def __init__(self,
workdir,
strategy,
dataset_builder,
logger_cls = logger.Logger,
model_cls = None,
loss_fn = None,
optimizer_cls = None,
batch_size = 128,
num_steps = 10_000,
num_eval_steps = None,
num_steps_per_train_iteration = 10,
graph_mode = True,
eval_in_train_job = True,
reference_workdir = None,
num_reference_steps = None):
self._workdir = workdir
self.strategy = strategy
self._dataset_builder = dataset_builder
self._logger_cls = logger_cls
self._loss_fn = loss_fn
self._model_cls = model_cls
self._optimizer_cls = optimizer_cls
self._batch_size = batch_size
self._num_steps = num_steps
self._num_eval_steps = num_eval_steps
self._num_steps_per_train_iteration = num_steps_per_train_iteration
self._graph_mode = graph_mode
self._eval_in_train_job = eval_in_train_job
with self.strategy.scope():
self.model = self._model_cls()
self._step = tf.Variable(0, dtype=tf.int64, trainable=False, name='step')
self._optimizer = optimizer_cls() if optimizer_cls is not None else None
self._checkpointer = checkpoint.Checkpointer(
self._workdir, self.strategy, self.model, self._step, self._optimizer)
# For eval / downstream jobs, the reference checkpointing.
if reference_workdir is not None:
with self.strategy.scope():
self._reference_step = tf.Variable(
0, dtype=tf.int64, trainable=False, name='ref_step')
self._num_reference_steps = num_reference_steps
self._reference_ckpt = checkpoint.Checkpointer(
reference_workdir, self.strategy, self.model, self._reference_step)
else:
self._reference_ckpt = None
self._reference_step = None
self._num_reference_steps = None
def run(self, task = Task.TRAIN):
"""Run the training loop for the given task."""
tasks = {
Task.TRAIN: self.train,
Task.EVAL: self.evaluate,
Task.DOWNSTREAM: self.downstream
}
task = task if isinstance(task, Task) else Task[task.upper()]
task_fn = tasks.get(task, None)
if task_fn is None:
raise ValueError(
f'Unknown task {task}. Possible values are '
f'{[t.name for t in tasks.keys()]}')
task_fn()
@property
def _learning_rate(self):
lr = self._optimizer.lr
return lr if not callable(lr) else lr(self._step)
@gin.configurable(module='TrainingLoop')
def train_step(self,
inputs,
log,
training = True):
"""Runs one training step."""
def step_fn(features, y_true, weights, metadata):
"""step_fn is replicated when running with TPUStrategy."""
with tf.GradientTape() as tape:
y_pred = self.model(features, training=training)
local_loss, individual_losses = self._loss_fn(y_true, y_pred, weights)
local_loss += sum(self.model.losses)
replica_ctx = tf.distribute.get_replica_context()
grads = tape.gradient(
local_loss, self.model.trainable_variables,
unconnected_gradients=tf.UnconnectedGradients.ZERO)
grads = replica_ctx.all_reduce('sum', grads)
self._optimizer.apply_gradients(
grads_and_vars=zip(grads, self.model.trainable_variables),
experimental_aggregate_gradients=False)
loss = replica_ctx.all_reduce('sum', local_loss)
individual_losses = {k: replica_ctx.all_reduce('sum', v)
for k, v in individual_losses.items()}
grad_norm = tf.linalg.global_norm(grads)
log.update_mean('loss', loss)
for k, v in individual_losses.items():
log.update_mean(k, v)
log.update_mean('gradient_norm', grad_norm)
# TODO(fllinares, oliviert): do not average LR??
log.update_mean('learning_rate', self._learning_rate)
for m in self.model.metrics:
log.update_mean(m.name, m.result())
log.update(y_true, y_pred, weights, metadata)
self._step.assign_add(1)
for _ in tf.range(self._num_steps_per_train_iteration):
x, y_true, weights, metadata = next(inputs)
self.strategy.run(step_fn, args=[x, y_true, weights, metadata])
def parse_eval_splits(self, verbose = True):
"""Returns preconfigured list of `split` args for `dataset_builder.make`."""
# Uses eval splits configured in the dataset builder. If none are present,
# defaults to `tfds.Split.TEST`.
split = self._dataset_builder.split
split = tfds.Split.TEST if split is None else split
# In single-input mode, i.e. when dataset_builder is a `DatasetBuilder`
# instance, `split` can be
# + a `str` (one eval split),
# + a `Sequence[str]` (multiple eval splits within the same job).
# In multi-input mode, i.e. when dataset_builder is a `MultiDatasetBuilder`,
# instance, `split` can be
# + a `str` (one eval split, all subdatasets share the same split name),
# + a `Sequence[str]` (multiple eval splits within the same job, all
# subdatasets share the same split name),
# + a `Sequence[Sequence[str]]` (multiple eval splits within the same job,
# each subdataset configured with a different split name)
splits = (split,) if isinstance(split, str) else tuple(split)
if verbose:
for i, split in enumerate(splits):
split = (split,) if isinstance(split, str) else tuple(split)
logging.info(
'Eval splits (%d / %d): %s.', i + 1, len(splits), ', '.join(split))
return splits
def make_ds(self, split=None):
return self._dataset_builder.make(split, self._batch_size, self.strategy)
def make_logger(self, split, task, dummy = False):
if dummy:
return logger.DummyLogger()
split = split if isinstance(split, str) else ','.join(split)
return self._logger_cls(self._workdir, self.strategy, split, task)
def train(self, freeze = False, silent = False):
"""Trains the network."""
train_step_fn = (tf.function(self.train_step) if self._graph_mode
else self.train_step)
logging.info('Starting training.')
train_split = tfds.Split.TRAIN
train_examples = iter(self.make_ds(train_split))
logging.info('train: train dataset ready.')
eval_ds = None
if self._eval_in_train_job:
eval_splits = self.parse_eval_splits()
eval_ds = [self.make_ds(split) for split in eval_splits]
logging.info('train: eval dataset(s) ready.')
eval_logs = [self.make_logger(split, 'evaluate') for split in eval_splits]
train_log = self.make_logger(train_split, 'train', dummy=silent)
first_inputs, _, _, _ = next(train_examples)
self.may_transfer(first_inputs, freeze=freeze)
self._checkpointer.restore()
while self._step.numpy() < self._num_steps:
train_step_fn(train_examples, log=train_log)
step = self._step.numpy()
logged = train_log.log_and_reset(step, step >= self._num_steps)
self._checkpointer.may_save(step >= self._num_steps)
if logged and eval_ds is not None:
for ds, log in zip(eval_ds, eval_logs):
self.evaluate_once(ds, log)
train_log.restart_clock()
# Just for debug.
if step < 10:
logging.info('Train step %i completed', step)
@gin.configurable(module='TrainingLoop')
def eval_step(self, inputs, y_true, weights, metadata, log, training=False):
"""Run a single eval step, in a distributed strategy."""
def step_fn(x, y_true, weights, metadata):
y_pred = self.model(x, training=training)
local_loss, individual_losses = self._loss_fn(y_true, y_pred, weights)
local_loss += sum(self.model.losses)
replica_ctx = tf.distribute.get_replica_context()
loss = replica_ctx.all_reduce('sum', local_loss)
individual_losses = {k: replica_ctx.all_reduce('sum', v)
for k, v in individual_losses.items()}
log.update_mean('loss', loss)
for k, v in individual_losses.items():
log.update_mean(k, v)
log.update(y_true, y_pred, weights, metadata)
self.strategy.run(step_fn, args=[inputs, y_true, weights, metadata])
def evaluate_once(self, ds, log):
"""Evaluate by passing once through the dataset."""
# TODO(oliviert, fllinares): try jit_compile=True.
eval_step_fn = (tf.function(self.eval_step) if self._graph_mode
else self.eval_step)
for x, y_true, weights, metadata in itertools.islice(
ds, 0, self._num_eval_steps):
eval_step_fn(x, y_true, weights, metadata, log)
log.log_and_reset(self._step.numpy(), force=True)
@gin.configurable(module='TrainingLoop')
def evaluate(self, finetune_fn=None):
"""Evaluates the trained network by reading the train checkpoints.
Args:
finetune_fn: A (typically, gin-configured) callable that takes a
TrainingLoop object as its first argument. Its main purpose is to allow
arbitrary postprocessing of the model prior to eval. Note, however, that
these changes will *not* be persistent *nor* saved as a checkpoint.
"""
logging.info('Starting evaluation.')
splits = self.parse_eval_splits()
eval_ds = [self.make_ds(split) for split in splits]
logging.info('evaluate: eval dataset(s) ready.')
eval_logs = [self.make_logger(split, 'evaluate') for split in splits]
ckpt = (self._checkpointer if self._reference_ckpt is None
else self._reference_ckpt)
step = self._step if self._reference_step is None else self._reference_step
while step.numpy() < self._num_steps - 1:
ckpt.restore_after(step.numpy())
if finetune_fn is not None:
logging.info('evaluate: executing finetune_fn...')
finetune_fn(loop=self)
logging.info('evaluate: finetune_fn completed.')
self._step.assign(step)
for ds, log in zip(eval_ds, eval_logs):
# TODO(fllinares): double-check this doesn't mess with the clocks.
log.restart_clock()
self.evaluate_once(ds, log)
def downstream(self):
"""Runs a downstream task (train and test) based on upstream checkpoints."""
logging.info('Starting downstream.')
if self._reference_ckpt is None:
logging.info('No reference pass for the upstream task')
# Dataset and logger for train and test.
eval_splits = self.parse_eval_splits()
ds_logs = [(self.make_ds(split), self.make_logger(split, 'downstream'))
for split in (tfds.Split.TRAIN,) + eval_splits]
while self._reference_step.numpy() < self._num_reference_steps - 1:
# Re-initializes model, preventing weights in head being trained from
# being re-used from the last train-eval cycle.
with self.strategy.scope():
self.model = self._model_cls()
self._checkpointer.set_model(self.model)
self._reference_ckpt.set_model(self.model)
# TODO(oliviert): Consider instead having one logging folder per upstream
# step to save the whole downstream learning curve.
self.train(freeze=True, silent=True)
# Logs at the proper upstream step and the end of training.
self._step.assign(self._reference_step)
for ds, log in ds_logs:
self.evaluate_once(ds, log)
# Enables re-training from scratch.
self._checkpointer.delete()
self._step.assign(0)
@gin.configurable(module='TrainingLoop')
def may_transfer(
self,
inputs,
freeze = False,
reset_head = False):
"""Tries to restore the weights from the reference model, if exists."""
logging.info('Trying to transfer weights.')
if self._reference_ckpt is None:
return
logging.info('Transferring weights from %s',
self._reference_ckpt._workdir) # pylint: disable=protected-access
if isinstance(reset_head, bool):
reset_head = self.model.heads.constant_copy(reset_head)
# Initializes the weights to be able to restore them.
@tf.function
def predict(inputs):
self.model(inputs)
self.strategy.run(predict, args=[inputs])
# Backs up random init vals of params of output heads that are not to be
# restored from the reference checkpoint.
heads_init_vars = reset_head.constant_copy([])
for head, head_init_vars, reset_head_flag in zip(
self.model.heads, heads_init_vars, reset_head):
if reset_head_flag:
head_init_vars.extend([v.value() for v in head.variables])
self._reference_ckpt.restore_after(self._reference_step.numpy())
# TODO(oliviert): make this more configurable.
if freeze:
self.model.encoder.trainable = False
if self.model.aligner is not None:
self.model.aligner.trainable = False
logging.info('Weights transferred complete.')
# Optionally, reinitializes (a subset of) the output heads.
for head, head_init_vars, reset_head_flag in zip(
self.model.heads, heads_init_vars, reset_head): # pylint: disable=protected-access
if reset_head_flag:
for var, init_var in zip(head.variables, head_init_vars):
var.assign(init_var)
logging.info('Output head %s was reset.', head.name)
| [
"[email protected]"
] | |
2af7e96fce26a0c36dc8219da9028f0f0366eac9 | 3940b4a507789e1fbbaffeb200149aee215f655a | /warmUpOC/binaryHeap-RandomArraytoMaxheap.py | 87be6ff429535ca6536ad1e160d1c999e33f5d6b | [] | no_license | akimi-yano/algorithm-practice | 15f52022ec79542d218c6f901a54396a62080445 | 1abc28919abb55b93d3879860ac9c1297d493d09 | refs/heads/master | 2023-06-11T13:17:56.971791 | 2023-06-10T05:17:56 | 2023-06-10T05:17:56 | 239,395,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,726 | py | # Binary Heap - Random Array to Maxheap
#### Prompt:
Given an array of randomly sorted integers, rearrange the elements so that the
array is a maxheap.
What is the Big O runtime of this algorithm? Please provide your rationale
as well
#### Examples:
```
Example 1:
arr = [3, 6, 4, 2, 7, 5, 1]
return [7, 6, 5, 2, 3, 4, 1]
Example 2:
arr = [1, 2, 3, 4, 5, 6, 7, 8]
return [8, 5, 7, 4, 1, 6, 3, 2]
```
#### Input:
`arr` = `Array of Integers`
#### Output:
`result` = `Array of Integers`
#### Constraints
**Time**: `??`
**Space**: `O(1)`
Where `n` is the length of the `arr` array
Perform the algorithm in place on the input array, and then return this
input array
#### Resources:
[Binary Heaps](http://eloquentjavascript.net/1st_edition/appendix2.html)
#### Hints:
Refer back to the Minheap implementation from your homework
When proving the Big O runtime, consider drawing out the binary tree
representation of a maxheap versus the array representation
#### Solution:
[//]: {{{
```Javascript
function convert(arr) {
function getChild(parent) {
let child1 = 2 * parent + 1;
let child2 = 2 * parent + 2;
if (child1 >= arr.length) {
return child1;
} else if (child2 >= arr.length) {
return child1;
} else if (arr[child1] > arr[child2]) {
return child1;
} else {
return child2;
}
}
function bubbleDown(parent) {
let child = getChild(parent);
while (child < arr.length && arr[parent] < arr[child]) {
[arr[child], arr[parent]] = [arr[parent], arr[child]];
parent = child;
child = getChild(parent);
}
}
let i = arr.length;
while (i--) {
bubbleDown(i);
}
return arr;
}
```
[//]: ---
YOUR WORK HERE
[//]: }}}
| [
"[email protected]"
] | |
ec6a13a47cbceadb43a7cf88141a8bbd15d35e42 | 47d3e3149269277b164fecb176b5d0297d398b2e | /Python_coding_dojang/Unit 45/package01.py | 26e03fb39f10ee0e5620aa19a6b0855bab0ab67a | [] | no_license | heechul90/study-python-basic-1 | 325e8c81fe35cd0cd22934869413e475b6734652 | 82d778e5960c0bde102bdc4c52fc61f61ba27745 | refs/heads/master | 2022-10-31T07:03:54.213599 | 2022-10-24T10:54:40 | 2022-10-24T10:54:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,923 | py | ### Unit 45. 모듈과 패키지 만들기
## 45.3 패키지 만들기
## 모듈은 스크립트 파일이 한 개지만 패키지는 폴더(디렉터리)로 구성되어 있습니다.
# calcpkg/__init__.py
# # __init__.py 파일은 내용을 비워 둘 수 있음
## 폴더(디렉터리) 안에 __init__.py 파일이 있으면 해당 폴더는 패키지로 인식됩니다.
## 그리고 기본적으로 __init__.py 파일의 내용은 비워 둘 수 있습니다
## (파이썬 3.3 이상부터는 __init__.py 파일이 없어도 패키지로 인식됩니다.
## 하지만 하위 버전에도 호환되도록 __init__.py 파일을 작성하는 것을 권장합니다).
## 45.3.1 패키지에 모듈 만들기
## 첫 번째 모듈은 덧셈, 곱셈 함수가 들어있는 operation 모듈이고,
## 두 번째 모듈은 삼각형, 사각형의 넓이 계산 함수가 들어있는 geometry 모듈입니다
# calcpkg/operation.py
# def add(a, b):
# return a + b
#
# def mul(a, b):
# return a * b
# calcpkg/geometry.py
# def triangle_area(base, height):
# return base * height / 2
#
# def rectangle_area(width, height):
# return width * height
## 45.3.2 패키지 사용하기
# import 패키지.모듈
# 패키지.모듈.변수
# 패키지.모듈.함수()
# 패키지.모듈.클래스()
import calcpkg.operation # calcpkg 패키지의 operation 모듈을 가져옴
import calcpkg.geometry # calcpkg 패키지의 geometry 모듈을 가져옴
print(calcpkg.operation.add(10, 20)) # operation 모듈의 add 함수 사용
print(calcpkg.operation.mul(10, 20)) # operation 모듈의 mul 함수 사용
print(calcpkg.geometry.triangle_area(30, 40)) # geometry 모듈의 triangle_area 함수 사용
print(calcpkg.geometry.rectangle_area(30, 40)) # geometry 모듈의 rectangle_area 함수 사용
## 45.3.3 from import로 패키지의 모듈에서 변수, 함수, 클래스 가져오기
## 패키지의 모듈에서 from import로 함수(변수, 클래스)를 가져온 뒤
# 패키지와 모듈 이름을 붙이지 않고 사용할 수도 있습니다.
# from 패키지.모듈 import 변수
# from 패키지.모듈 import 함수
# from 패키지.모듈 import 클래스
from calcpkg.operation import add, mul
add(10, 20)
mul(10, 20)
# 참고 | 패키지의 모듈과 __name__
# 패키지의 모듈에서는 __name__ 변수에 패키지.모듈 형식으로 이름이 들어갑니다.
# 즉, calcpkg 패키지의 geometry.py에서 __name__의 값을 출력하도록 만들고,
# import로 가져오면 'calcpkg.geometry'가 나옵니다.
# 참고 | 모듈과 패키지를 찾는 경로
# 지금까지 모듈과 패키지는 현재 폴더(디렉터리)에 만들었습니다.
# 파이썬에서는 현재 폴더에 모듈, 패키지가 없으면 다음 경로에서 모듈, 패키지를 찾습니다.
import sys
sys.path | [
"[email protected]"
] | |
96df125c2050e8380da4e03b47062197c37e68f7 | 5bd4893a793ed739127f15becd9558cacf461540 | /scripts/rot_photos.py | 5219086d03b68403f7db22b5c1aced82b54b5007 | [] | no_license | hauensteina/ahn-repo | d3aa665eeef846e426b866d587e8649c8283e74c | 93bd7c54548a083f39510fc562c9e7540c4f672a | refs/heads/master | 2023-07-24T05:34:51.289699 | 2023-07-13T16:10:25 | 2023-07-13T16:10:25 | 99,860,476 | 0 | 1 | null | 2023-07-15T01:33:35 | 2017-08-09T23:20:28 | Python | UTF-8 | Python | false | false | 2,121 | py | #!/usr/bin/env python
# Rotate all *JPG files in the current folder so that the image header
# matches the actual image rotation.
# Then you can rotate them manually for upload to hauenstein.nine.ch/andiconny .
# AHN, Jan 2020
from __future__ import division, print_function
import os,sys,re,glob,shutil
import subprocess
import argparse
from pdb import set_trace as BP
#---------------------------
def usage(printmsg=False):
name = os.path.basename(__file__)
msg = '''
Name:
%s -- Rotate jpeg images consistent with header
Synopsis:
%s --run
Description:
Rotate all *JPG files in the current folder so that the image header
matches the actual image rotation. Then you can rotate them manually
for upload to hauenstein.nine.ch/andiconny .
HEIC images are converted to jpg on the way.
Example:
%s --run
''' % (name,name,name)
if printmsg:
print(msg)
exit(1)
else:
return msg
#--------------
def main():
parser = argparse.ArgumentParser(usage=usage())
parser.add_argument("--run", required=True, action='store_true')
args = parser.parse_args()
IMG_FOLDER = '.'
images = glob.glob(IMG_FOLDER + '/*.jpg')
images += glob.glob(IMG_FOLDER + '/*.jpeg')
images += glob.glob(IMG_FOLDER + '/*.JPG')
images += glob.glob(IMG_FOLDER + '/*.JPEG')
images += glob.glob(IMG_FOLDER + '/*.HEIC')
ORIGFOLDER = 'orig'
if not os.path.exists( ORIGFOLDER):
os.mkdir( ORIGFOLDER)
for img in images:
shutil.move( img, ORIGFOLDER)
for img in images:
print( img)
inf = os.path.basename( img)
ext = os.path.splitext( inf)[1]
jpgfile = '%s.%s' % (os.path.splitext( inf)[0], 'jpg')
if ext == '.HEIC':
cmd = 'convert %s/%s %s/%s' % (ORIGFOLDER, inf, ORIGFOLDER, jpgfile)
subprocess.check_output( cmd, shell=True)
inf = jpgfile
cmd = 'ffmpeg -i %s/%s -c:a copy %s' % (ORIGFOLDER, inf, jpgfile)
subprocess.check_output( cmd, shell=True)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
2023fb2002eba629aa483150c449161bf19a08ed | 58b87ea29a95a5ceeaae4c2d7db1b16502ed158f | /ComputationalPhysics/Homework/hw3.py | d368ea50245383ff59eb1bac5299c93e92da3b2c | [] | no_license | meyerpa/Python | b609e8c036b478b20cd17a4cc47b71c129c968f8 | 3797f9be3341e69d5e9eccfc1b4e7f52fdd9c666 | refs/heads/master | 2021-01-01T03:58:40.183829 | 2018-03-14T14:24:57 | 2018-03-14T14:24:57 | 56,526,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,076 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 26 13:30:49 2017
@author: Paige Meyer
@date: 1-26-2016
@file: homework3
@description: This file contains code to read sunspots.txt,
show the number of months, graph the sunspots with respect to time,
and average the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from os.path import join
# format filename
filename = join("cpresources", "sunspots.txt")
# read the data from sunspots.txt
data = np.loadtxt(filename, float)
x = data[:, 0]
y = data[:, 1]
# take only first 1000 datapts
x = x[:1000]
y = y[:1000]
# calculate the average of ten points
avg = []
for i in x:
summ = 0
for j in np.linspace(i-5, i+5, 10):
if j >= 0 and j < len(x):
summ += y[int(j)]
avg.append(1/(2*5)*summ)
# plot stuff
plt.plot(x, y, color="r", alpha=.3, label="Sunspot count")
plt.plot(x, avg, color="c", label="Average sunspots")
# format plot
plt.legend()
plt.xlabel("month")
plt.ylabel("number of sunspots")
plt.title("Sunspots vs. time")
plt.show()
| [
"[email protected]"
] | |
7f87b5c5bed34bb76c1ee9f8face990205269f2d | de392462a549be77e5b3372fbd9ea6d7556f0282 | /accounts/migrations/0035_auto_20200910_1200.py | dac2d5c9008ca94f225b7cedf49b36e06ab4e2cf | [] | no_license | amutebe/AMMS_General | 2830770b276e995eca97e37f50a7c51f482b2405 | 57b9b85ea2bdd272b44c59f222da8202d3173382 | refs/heads/main | 2023-07-17T02:06:36.862081 | 2021-08-28T19:07:17 | 2021-08-28T19:07:17 | 400,064,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 732 | py | # Generated by Django 3.0.2 on 2020-09-10 09:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0034_auto_20200910_1126'),
]
operations = [
migrations.AlterField(
model_name='car',
name='car_number',
field=models.CharField(default='TEGA10092020946', max_length=200, primary_key=True, serialize=False, verbose_name='Corrective action no.:'),
),
migrations.AlterField(
model_name='employees',
name='employeeID',
field=models.CharField(default='TEGA239', max_length=10, primary_key=True, serialize=False, verbose_name='Employee ID'),
),
]
| [
"[email protected]"
] | |
d7d3c2d7ef21f281073e54af1f20b0d335a6d4a2 | 3c17e189622018329bc0ebd8523eae8db9f3112a | /ykdl/extractors/netease/live.py | 8a3b1f40c764c49483d13ce1bbf800566e2c18ed | [
"MIT"
] | permissive | YU-zreo/ykdl | 167c9b8715a1cecf57c18bf60c7da3b22437ad06 | b59dacd78bcec79d208d7cb86b86fa65428e386a | refs/heads/master | 2020-12-02T12:47:01.113309 | 2017-07-07T12:39:20 | 2017-07-07T12:39:20 | 96,594,712 | 1 | 0 | null | 2017-07-08T03:57:22 | 2017-07-08T03:57:21 | null | UTF-8 | Python | false | false | 912 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from ykdl.util.html import get_content
from ykdl.util.match import match1
from ykdl.extractor import VideoExtractor
from ykdl.videoinfo import VideoInfo
import json
class NeteaseLive(VideoExtractor):
name = u"网易直播 (163)"
def prepare(self):
info = VideoInfo(self.name, True)
if not self.vid:
html = get_content(self.url)
self.vid = match1(html, "anchorCcId : \'([^\']+)")
info.title = match1(html, "title: \'([^\']+)")
info.artist = match1(html, "anchorName : \'([^\']+)")
data = json.loads(get_content("http://cgi.v.cc.163.com/video_play_url/{}".format(self.vid)))
info.stream_types.append("current")
info.streams["current"] = {'container': 'flv', 'video_profile': "current", 'src' : [data["videourl"]], 'size': 0}
return info
site = NeteaseLive()
| [
"[email protected]"
] | |
810ad686960f4a19c7624ae8c49a2c551ec555b7 | 9e5452e9a8079125d2f89aedca7ca5b675171fee | /src/industries/rubber_plantation.py | 404260e3952ecc8087de4b62cb52bd265a5299ef | [] | no_license | RadarCZ/firs | c16f8b2faf3c770c873bab948adc0bd850156dd5 | da1d614c0a92b91978ff212015ed9d00c9f37607 | refs/heads/master | 2023-08-13T09:05:32.939857 | 2021-09-24T18:10:28 | 2021-09-24T18:10:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,899 | py | from industry import IndustryPrimaryOrganic, TileLocationChecks
industry = IndustryPrimaryOrganic(
id="rubber_plantation",
prod_cargo_types_with_multipliers=[("RUBR", 16)],
map_colour="121",
prob_in_game="4",
prob_map_gen="11",
prospect_chance="0.75",
name="TTD_STR_INDUSTRY_NAME_RUBBER_PLANTATION",
extra_text_fund="string(STR_FUND_RUBBER_PLANTATION)",
nearby_station_name="string(STR_STATION_TAPPERS_SHED)",
location_checks=dict(require_cluster=[72, 4]),
fund_cost_multiplier="54",
override_default_construction_states=True,
)
industry.economy_variations["IN_A_HOT_COUNTRY"].enabled = True
industry.economy_variations["IN_A_HOT_COUNTRY"].prod_cargo_types_with_multipliers = [
("RUBR", 16)
]
# industry.economy_variations['IN_A_HOT_COUNTRY'].prod_cargo_types_with_multipliers = [('LATX', 16)]
industry.add_tile(
id="rubber_plantation_tile_1",
foundations="return CB_RESULT_NO_FOUNDATIONS",
autoslope="return CB_RESULT_NO_AUTOSLOPE",
location_checks=TileLocationChecks(
disallow_above_snowline=True,
disallow_coast=True,
disallow_industry_adjacent=True,
),
)
industry.add_tile(
id="rubber_plantation_tile_2", # house
autoslope="return CB_RESULT_AUTOSLOPE",
location_checks=TileLocationChecks(
disallow_above_snowline=True,
disallow_coast=True,
disallow_industry_adjacent=True,
),
)
sprite_ground = industry.add_sprite(sprite_number=3962)
spriteset_ground_overlay = industry.add_spriteset(type="empty")
spriteset_1 = industry.add_spriteset(
sprites=[(10, 10, 64, 59, -31, -28)],
)
spriteset_2 = industry.add_spriteset(
sprites=[(80, 10, 64, 59, -31, -28)],
)
industry.add_spritelayout(
id="rubber_plantation_house_spritelayout",
ground_sprite=sprite_ground,
ground_overlay=spriteset_ground_overlay,
building_sprites=[spriteset_1],
)
industry.add_spritelayout(
id="rubber_plantation_shed_spritelayout",
ground_sprite=sprite_ground,
ground_overlay=spriteset_ground_overlay,
building_sprites=[spriteset_2],
)
industry.add_magic_spritelayout(
type="slope_aware_trees",
base_id="rubber_plantation_slope_aware_ground_with_trees_1",
config={"ground_sprite": 4145, "trees_default": [1908, 1908, 1908, 1908]},
)
industry.add_magic_spritelayout(
type="slope_aware_trees",
base_id="rubber_plantation_slope_aware_ground_with_trees_2",
config={"ground_sprite": 4145, "trees_default": [1906, 1905, 1905, 1907]},
)
industry.add_industry_layout(
id="rubber_plantation_layout_1",
layout=[
(
0,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
2,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(1, 2, "rubber_plantation_tile_2", "rubber_plantation_shed_spritelayout"),
(
2,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_2",
),
(2, 2, "rubber_plantation_tile_2", "rubber_plantation_house_spritelayout"),
],
)
industry.add_industry_layout(
id="rubber_plantation_layout_2",
layout=[
(
0,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(0, 2, "rubber_plantation_tile_2", "rubber_plantation_shed_spritelayout"),
(
0,
3,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
2,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
3,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_2",
),
(1, 4, "rubber_plantation_tile_2", "rubber_plantation_house_spritelayout"),
],
)
industry.add_industry_layout(
id="rubber_plantation_layout_3",
layout=[
(
0,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_2",
),
(
1,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
2,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
2,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(3, 0, "rubber_plantation_tile_2", "rubber_plantation_shed_spritelayout"),
(
3,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_2",
),
(
4,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(4, 1, "rubber_plantation_tile_2", "rubber_plantation_house_spritelayout"),
],
)
industry.add_industry_layout(
id="rubber_plantation_layout_4",
layout=[
(
0,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
3,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
3,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
3,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(3, 1, "rubber_plantation_tile_2", "rubber_plantation_shed_spritelayout"),
(
3,
3,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
3,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
4,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_2",
),
(4, 1, "rubber_plantation_tile_2", "rubber_plantation_house_spritelayout"),
(
4,
3,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
4,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
],
)
industry.add_industry_layout(
id="rubber_plantation_layout_5",
layout=[
(
0,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
2,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
0,
5,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(1, 2, "rubber_plantation_tile_2", "rubber_plantation_shed_spritelayout"),
(
1,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
5,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
1,
6,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
2,
0,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
2,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(2, 2, "rubber_plantation_tile_2", "rubber_plantation_house_spritelayout"),
(
2,
4,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
2,
5,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
2,
6,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
3,
1,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
3,
2,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_2",
),
(
3,
5,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
(
3,
6,
"rubber_plantation_tile_1",
"rubber_plantation_slope_aware_ground_with_trees_1",
),
],
)
| [
"[email protected]"
] | |
5660cf373fd81ac3f88d952f37b7290ad5c9e660 | 5c5e7b03c3373e6217665842f542ca89491290ff | /2015/day24.py | da34556a0fac89ed5e4bf110b667f266d5fe3ae1 | [] | no_license | incnone/AdventOfCode | 9c35214e338e176b6252e52a25a0141a01e290c8 | 29eac5d42403141fccef3c3ddbb986e01c89a593 | refs/heads/master | 2022-12-21T21:54:02.058024 | 2022-12-15T17:33:58 | 2022-12-15T17:33:58 | 229,338,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | from getinput import get_input
import itertools
import copy
def list_prod(x):
prod = 1
for s in x:
prod *= s
return prod
def partitionable(weights):
weight_sum = sum(w for w in weights)
if weight_sum % 2 != 0:
return False
n = len(weights)
k = weight_sum // 2
# p[i][j] = There exists a subset of the first j weights summing to i (hence, we want to know p[k][n])
p = [[False for _ in range(n + 1)] for _ in range(k + 1)]
for j in range(len(p[0])):
p[0][j] = True
# Fill out one row at a time
for i in range(1, k + 1):
for j in range(1, n + 1):
# If the next weight isn't too large, then we can make i either by using this weight and prior weights,
# or by only using prior weights
if (i - weights[j-1]) >= 0:
p[i][j] = p[i][j-1] or p[i - weights[j-1]][j-1]
# Otherwise, the only way to make a weight of i is with the weights before this one
else:
p[i][j] = p[i][j-1]
return p[k][n]
def balanceable(subset, weights):
remaining_weights = [w for w in weights if w not in subset]
desired_weight = sum(subset)
if sum(remaining_weights) != 2*desired_weight:
return False
return partitionable(weights)
def sums_exist_hlpr(weights, idx, sums, cache):
"""Check whether the set weights[:idx+1] can be split into sets with the sums given in sums. Use cache
to store the result of computations."""
if (idx, sums) in cache:
return cache[(idx, sums)]
if not any(x != 0 for x in sums):
return True
if idx < 0:
return False
sums_exist = False
for jdx in range(len(sums)):
remainder = sums[jdx] - weights[idx]
if remainder >= 0:
sums_exist = sums_exist \
or sums_exist_hlpr(weights, idx-1, sums[:jdx] + (remainder,) + sums[jdx+1:], cache)
cache[(idx, sums)] = sums_exist
return sums_exist
def tripartitionable(weights):
wsum = sum(weights)
if wsum % 3 != 0:
return False
n = len(weights)
cache = dict()
answer = sums_exist_hlpr(weights, n-1, (wsum//3, wsum//3, wsum//3), cache)
return answer
def parse_input(s):
weights = []
for line in s.splitlines(keepends=False):
weights.append(int(line))
return weights
def part_1(weights):
for subset_size in range(1, len(weights)+1):
subsets = sorted(itertools.combinations(weights, subset_size), key=lambda x: list_prod(x))
for subset in subsets:
if balanceable(subset, weights):
return list_prod(subset)
return None
def part_2(weights):
packagesum = sum(weights)
assert packagesum % 4 == 0
for subset_size in range(1, len(weights)+1):
subsets = sorted(itertools.combinations(weights, subset_size), key=lambda x: list_prod(x))
for subset in subsets:
if sum(subset) != packagesum // 4:
continue
if tripartitionable([w for w in weights if w not in subset]):
return list_prod(subset)
return None
if __name__ == "__main__":
the_pkg_weights = parse_input(get_input(24))
print('Part 1:', part_1(the_pkg_weights))
print('Part 2:', part_2(the_pkg_weights))
| [
"[email protected]"
] | |
1bd16f88bb8cf77b42c10f23cb961dac40c8112e | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/torch/nn/parallel/scatter_gather.py | 022b96bf08f30d37561e898459ab0e809d0e29ed | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:b69f556cfd2c67160d3730c231d1bb6d26eaf9dcc7d69128f5c480af8679521d
size 2690
| [
"[email protected]"
] | |
6a817ecffb86c41f42b6deea0488e93cad9bc8f1 | 0760b6a6912914dda2923f0fa79fc1a6571ef376 | /turtle/graphics pattern.py | 23c8f87a8e6efd6e58bebc6843f0815740aa7cc1 | [] | no_license | PrateekJain999/Python-Codes | 030cc8aedcea52e3142d545fb92aeff5f895ca5f | 21e2f5a135e9646ac8fb5845ad10bc6bbf3c23c7 | refs/heads/main | 2023-02-26T11:11:28.951296 | 2021-02-04T11:36:09 | 2021-02-04T11:36:09 | 331,254,596 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | import turtle as t
t.setpos(0,0)
t.circle(50)
t.penup()
t.setpos(-50,50)
t.pendown()
t.fd(100)
t.penup()
t.rt(90)
t.setpos(0,100)
t.pendown()
t.fd(100)
t.penup()
t.setpos(0,-50)
t.lt(90)
t.pendown()
t.circle(100)
| [
"[email protected]"
] | |
62696409136a5cc3fdb711a033171a0ac2283072 | 8799cbe3a261fea3ff05af2fba7e3eade40b57f5 | /SocialMedia/chat/migrations/0003_message.py | 8d63257134147432e000872e0dc0071bfe366b5a | [] | no_license | Anoop-Suresh/Training | 83b5759db0d2113bb90731b243a1dd2d5be5992f | e6f4dd8a77fec058917dd25c424a1f3afc7df236 | refs/heads/master | 2022-11-30T08:18:21.432284 | 2019-10-13T03:48:15 | 2019-10-13T03:48:15 | 190,737,085 | 0 | 0 | null | 2022-11-22T04:17:20 | 2019-06-07T12:05:47 | Python | UTF-8 | Python | false | false | 843 | py | # Generated by Django 2.2.4 on 2019-08-19 09:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('chat', '0002_delete_message'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('timestamp', models.DateTimeField(auto_now_add=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
1a879097a3a0984ce04a4d7618b016354300f5f6 | 1d57fddd5945dd14868478bbf34dc7cbb272ee5c | /build/release/create_vortrac_src_release.py | fe6127028fb8ec5d4ebeb8a7c61ddd271b2f2349 | [
"BSD-3-Clause"
] | permissive | 1059444127/lrose-core | 21bff2d6f05804a7b1c1517df341197cbd2e14f2 | d1b62ee4b248ba98c73ec9d3cce969a7b7651679 | refs/heads/master | 2023-04-05T02:35:08.192082 | 2021-04-23T21:54:00 | 2021-04-23T21:54:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,034 | py | #!/usr/bin/env python
#===========================================================================
#
# Create a source release for vortrac
#
# Assumes that a source release has already been created for lrose-core
#
#===========================================================================
from __future__ import print_function
import os
import sys
import shutil
import subprocess
from optparse import OptionParser
import time
from datetime import datetime
from datetime import date
from datetime import timedelta
import glob
def main():
# globals
global thisScriptName
thisScriptName = os.path.basename(__file__)
global thisScriptDir
thisScriptDir = os.path.dirname(__file__)
os.chdir(thisScriptDir)
thisScriptDir = os.getcwd()
global buildDir
buildDir = os.path.join(thisScriptDir, '..')
os.chdir(buildDir)
buildDir = os.getcwd()
global homeDir
homeDir = os.environ['HOME']
global releaseInfoName
releaseInfoName = "ReleaseInfo.txt"
global package
package = "lrose-vortrac"
global corePackage
corePackage = "lrose-core"
global releaseDir
global tmpDir
global coreDir
global versionStr
global coreVersionStr
global releaseName
global tarName
global logPath
global logFp
global options
# parse the command line
usage = "usage: %prog [options]"
releaseDirDefault = os.path.join(homeDir, 'releases')
logDirDefault = '/tmp/create_src_release/logs'
parser = OptionParser(usage)
parser.add_option('--debug',
dest='debug', default=True,
action="store_true",
help='Set debugging on')
parser.add_option('--verbose',
dest='verbose', default=False,
action="store_true",
help='Set verbose debugging on')
parser.add_option('--releaseDir',
dest='releaseTopDir', default=releaseDirDefault,
help='Top-level release dir')
parser.add_option('--tag',
dest='tag', default="master",
help='Tag for checking out from git')
parser.add_option('--logDir',
dest='logDir', default=logDirDefault,
help='Logging dir')
parser.add_option('--force',
dest='force', default=False,
action="store_true",
help='force, do not request user to check it is OK to proceed')
(options, args) = parser.parse_args()
if (options.verbose):
options.debug = True
# runtime
now = time.gmtime()
nowTime = datetime(now.tm_year, now.tm_mon, now.tm_mday,
now.tm_hour, now.tm_min, now.tm_sec)
versionStr = nowTime.strftime("%Y%m%d")
# set directories
releaseDir = os.path.join(options.releaseTopDir, package)
tmpDir = os.path.join(releaseDir, "tmp")
coreDir = os.path.join(options.releaseTopDir, "lrose-core")
# compute release name and dir name
releaseName = package + "-" + versionStr + ".src"
tarName = releaseName + ".tgz"
# read the core release info
readCoreReleaseInfoFile()
# initialize logging
if (os.path.isdir(options.logDir) == False):
os.makedirs(options.logDir)
logPath = os.path.join(options.logDir, "master");
logFp = open(logPath, "w+")
if (options.debug):
print("Running %s:" % thisScriptName, file=sys.stderr)
print(" package: ", package, file=sys.stderr)
print(" releaseTopDir: ", options.releaseTopDir, file=sys.stderr)
print(" releaseDir: ", releaseDir, file=sys.stderr)
print(" logDir: ", options.logDir, file=sys.stderr)
print(" tmpDir: ", tmpDir, file=sys.stderr)
print(" force: ", options.force, file=sys.stderr)
print(" versionStr: ", versionStr, file=sys.stderr)
print(" releaseName: ", releaseName, file=sys.stderr)
print(" tarName: ", tarName, file=sys.stderr)
print(" corePackage: ", corePackage, file=sys.stderr)
print(" coreVersionStr: ", coreVersionStr, file=sys.stderr)
# save previous releases
savePrevReleases()
# create tmp dir
createTmpDir()
# get repos from git
logPath = prepareLogFile("git-checkout");
gitCheckout()
# create the release information file
createReleaseInfoFile()
# create the tar file
logPath = prepareLogFile("create-tar-file");
createTarFile()
# create the brew formula for OSX builds
#logPath = prepareLogFile("create-brew-formula");
logPath = prepareLogFile("no-logging");
createBrewFormula()
# delete the tmp dir
shutil.rmtree(tmpDir)
logFp.close()
sys.exit(0)
########################################################################
# move previous releases
def savePrevReleases():
if (os.path.isdir(releaseDir) == False):
return
os.chdir(releaseDir)
prevDirPath = os.path.join(releaseDir, 'previous_releases')
# remove if file instead of dir
if (os.path.isfile(prevDirPath)):
os.remove(prevDirPath)
# ensure dir exists
if (os.path.isdir(prevDirPath) == False):
os.makedirs(prevDirPath)
# get old releases
pattern = package + "-????????*.tgz"
oldReleases = glob.glob(pattern)
for name in oldReleases:
newName = os.path.join(prevDirPath, name)
if (options.debug):
print("saving oldRelease: ", name, file=logFp)
print("to: ", newName, file=logFp)
os.rename(name, newName)
########################################################################
# create the tmp dir
def createTmpDir():
# check if exists already
if (os.path.isdir(tmpDir)):
if (options.force == False):
print(("WARNING: you are about to remove all contents in dir: " + tmpDir))
print("===============================================")
contents = os.listdir(tmpDir)
for filename in contents:
print((" " + filename))
print("===============================================")
answer = "n"
if (sys.version_info > (3, 0)):
answer = input("WARNING: do you wish to proceed (y/n)? ")
else:
answer = raw_input("WARNING: do you wish to proceed (y/n)? ")
if (answer != "y"):
print(" aborting ....")
sys.exit(1)
# remove it
shutil.rmtree(tmpDir)
# make it clean
if (os.path.isdir(tmpDir) == False):
os.makedirs(tmpDir)
########################################################################
# check out repos from git
def gitCheckout():
os.chdir(tmpDir)
shellCmd("git clone --branch " + options.tag +
" https://github.com/mmbell/vortrac " + releaseName)
########################################################################
# write release information file
def createReleaseInfoFile():
global releaseInfoName
# go to core dir
os.chdir(releaseDir)
# open info file
releaseInfoPath = os.path.join(releaseDir, releaseInfoName)
info = open(releaseInfoPath, 'w')
# write release info
info.write("package:" + package + "\n")
info.write("version:" + versionStr + "\n")
info.write("release:" + releaseName + "\n")
# close
info.close()
# copy it up into the release dir
shellCmd("rsync -av " + releaseInfoName + " " + releaseDir)
########################################################################
# read latest release information file for the core
def readCoreReleaseInfoFile():
global coreVersionStr
coreVersionStr = "unknown"
_corePackage = "unknown"
_coreSrcRelease = "unknown"
# open info file
coreInfoPath = os.path.join(coreDir, releaseInfoName)
if (options.debug):
print("==>> reading core info file: ", coreInfoPath, file=sys.stderr)
info = open(coreInfoPath, 'r')
# read in lines
lines = info.readlines()
# close
info.close()
# decode lines
if (len(lines) < 1):
print("ERROR reading info file: ", coreInfoPath, file=sys.stderr)
print(" No contents", file=sys.stderr)
sys.exit(1)
for line in lines:
line = line.strip()
toks = line.split(":")
if (options.verbose):
print(" line: ", line, file=sys.stderr)
print(" toks: ", toks, file=sys.stderr)
if (len(toks) == 2):
if (toks[0] == "package"):
_corePackage = toks[1]
if (toks[0] == "version"):
coreVersionStr = toks[1]
if (toks[0] == "release"):
_coreSrcRelease = toks[1]
if (options.verbose):
print("==>> done reading info file: ", coreInfoPath, file=sys.stderr)
print("======>> coreVersionStr: ", coreVersionStr, file=sys.stderr)
print("======>> _corePackage: ", _corePackage, file=sys.stderr)
print("======>> _coreSrcRelease: ", _coreSrcRelease, file=sys.stderr)
########################################################################
# create the tar file
def createTarFile():
# go to tmp dir
os.chdir(tmpDir)
# create the tar file
shellCmd("tar cvfzh " + tarName + " " + releaseName)
# move the tar file into the release dir
os.rename(tarName, os.path.join(releaseDir, tarName))
########################################################################
# template for brew formula
formulaBody = """
require 'formula'
class LroseVortrac < Formula
homepage 'https://github.com/mmbell/vortrac'
url '{0}'
version '{1}'
sha256 '{2}'
depends_on 'libx11'
depends_on 'libxext'
depends_on 'qt5'
depends_on 'armadillo'
depends_on 'libzip'
depends_on 'cmake'
depends_on 'rsync'
depends_on 'lrose-core'
def install
# Build/install vortrac
ENV['LROSE_INSTALL_DIR'] = prefix
system "cmake", "-DCMAKE_INSTALL_PREFIX=#{{prefix}}", "."
system "make install"
end
def test
system "#{{bin}}/vortrac", "-h"
end
end
"""
########################################################################
# create the brew formula for OSX builds
def buildVortracFormula(tar_url, tar_name, formula_name):
os.chdir(releaseDir)
""" build a Homebrew forumula file for lrose-core """
dash = tar_name.find('-')
period = tar_name.find('.', dash)
version = tar_name[dash+1:period]
result = subprocess.check_output(("sha256sum", tar_name))
checksum = result.split()[0].decode('ascii')
formula = formulaBody.format(tar_url, version, checksum)
outf = open(formula_name, 'w')
outf.write(formula)
outf.close()
########################################################################
# create the brew formula for OSX builds
def createBrewFormula():
tarUrl = "https://github.com/NCAR/lrose-core/releases/download/" + \
corePackage + "-" + coreVersionStr + "/" + tarName
formulaName = package + ".rb"
buildVortracFormula(tarUrl, tarName, formulaName)
########################################################################
# prepare log file
def prepareLogFile(logFileName):
global logFp
logFp.close()
logPath = os.path.join(options.logDir, logFileName + ".log");
if (logPath.find('no-logging') >= 0):
logFp = sys.stderr
return logPath
print("========================= " + logFileName + " =========================", file=sys.stderr)
if (options.verbose):
print("====>> Creating log file: " + logPath + " <<==", file=sys.stderr)
logFp = open(logPath, "w+")
logFp.write("===========================================\n")
logFp.write("Log file from script: " + thisScriptName + "\n")
return logPath
########################################################################
# Run a command in a shell, wait for it to complete
def shellCmd(cmd):
print("Running cmd:", cmd, file=sys.stderr)
if (logPath.find('no-logging') >= 0):
cmdToRun = cmd
else:
print("Log file is:", logPath, file=sys.stderr)
print(" ....", file=sys.stderr)
cmdToRun = cmd + " 1>> " + logPath + " 2>&1"
try:
retcode = subprocess.check_call(cmdToRun, shell=True)
if retcode != 0:
print("Child exited with code: ", retcode, file=sys.stderr)
sys.exit(1)
else:
if (options.verbose):
print("Child returned code: ", retcode, file=sys.stderr)
except OSError as e:
print("Execution failed:", e, file=sys.stderr)
sys.exit(1)
print(" done", file=sys.stderr)
########################################################################
# Run - entry point
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
11334580d227b390aefdefb498b228ab139c24cd | b5f9f93a415a5cc0117a580c5da12804e68c141d | /scripts/motions/test/follow1.py | 2f7db1b8b9f8c5fa6b6ae576d5ae6484649a95d7 | [] | no_license | akihikoy/lfd_trick | 71f89d80abc27ffc6fbd5bc609322918a4f8264e | b7bf0189db7bcef07772db17de29302d6e8ba2bf | refs/heads/master | 2021-01-10T14:22:53.341666 | 2016-03-29T18:16:15 | 2016-03-29T18:16:15 | 50,623,958 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,335 | py | #!/usr/bin/python
from core_tool import *
def Help():
return '''Test: follow a q_traj that we experienced a failure.
Usage: test.follow1'''
def Run(t,*args):
if not t.robot.Is('Baxter'):
CPrint(4,'This test is only for Baxter.')
return
q_traj= [
[-1.0544096854806089, -0.2776862141411402, 0.28087579936236179, 1.6743394798167504, -0.56010443409106647, -1.4960295455121526, 0.57304766831757548],
[-1.0159048510309103, -0.35670424794068467, 0.33155411397969409, 1.8633858091147795, -0.47760512948645528, -1.5669787327781961, 0.42943210292684869],
[-0.96476264324992855, -0.42311836836398564, 0.39891331930441265, 2.0439506789702975, -0.37623150979886999, -1.6560476331050802, 0.28551453075495919],
[-0.92135636878025318, -0.46074409246091941, 0.45219267225159882, 2.1723939241120336, -0.29270704439420275, -1.7386809973087964, 0.17859944748797052],
[-0.91656936668207734, -0.46815040865258956, 0.44552228082284107, 2.23785467540214, -0.27927621904754196, -1.7900609544614621, 0.13306757513154097],
[-0.93221128726280056, -0.46074694640818792, 0.4028452571883796, 2.2804654161510274, -0.30677314754121038, -1.8313667753638549, 0.11292969674597057],
[-0.95616162214784528, -0.4438852029022185, 0.3410417135902617, 2.3111201017741747, -0.35492921493141205, -1.8713039748545086, 0.10151482877769019],
[-0.98726375374869657, -0.41888678102704663, 0.26351591185117751, 2.3249457989019198, -0.4190347361664789, -1.9063122558890606, 0.097811529213398987],
[-1.0242863194214729, -0.38779126582438822, 0.17653166339142254, 2.3179885163791289, -0.49143987933855837, -1.9329493608376833, 0.1015981943603622],
[-1.0656439567205895, -0.35291260947696607, 0.088150274951116991, 2.288431318010566, -0.5628497638992821, -1.9487387446436377, 0.11298602776161597],
[-1.1093022995220323, -0.31625308645432337, 0.0060286379659039453, 2.2371535204199069, -0.62494896087120411, -1.9526402401261265, 0.13156378816155415],
[-1.1529369448473732, -0.27921815728965166, -0.064511176363831246, 2.1674541197858996, -0.67262675398564087, -1.9448884257954011, 0.15577138596582199],
[-1.1941583016955246, -0.2427746643154888, -0.12106907666635845, 2.0842902079877921, -0.70442681842261712, -1.9265182356842492, 0.18293576997005481],
[-1.2306277028657173, -0.2078345031326814, -0.16355796294151093, 1.9935418251652242, -0.72147025495074257, -1.8989935296867262, 0.20976421147725308],
[-1.2601494641579252, -0.1754772449445727, -0.19278311631736536, 1.8987549292218027, -0.72543267928147837, -1.8618017119766488, 0.2344030341002333],
[-1.2820829098828739, -0.140350093340425, -0.20468057608344964, 1.7555429674580416, -0.71194974213929796, -1.7803069844426007, 0.27883255052236938],
[-1.2963234838184399, -0.091146422224025386, -0.20502880942036245, 1.5646316591788036, -0.69127656886362843, -1.6637240498207164, 0.33486503311347571],
[-1.3040405608865098, -0.027555656757953781, -0.20189327344255442, 1.3558203264485023, -0.67360914409511163, -1.5378054333056337, 0.38594834680744561],
[-1.3052683310380491, 0.032722855229927496, -0.19958386393719424, 1.1805017632013304, -0.66021822984803624, -1.4315895338502982, 0.4147167986184731],
[-1.2965664001224004, 0.051749212231033355, -0.19847320614447614, 1.1254808297736065, -0.64272551631330899, -1.386163254253725, 0.39388220133306295]]
t_traj= [1.779648830049163, 3.2565732776900154, 4.1970153081875035, 4.69874673452241, 4.94874673452241, 5.19874673452241, 5.44874673452241,
5.69874673452241, 5.94874673452241, 6.19874673452241, 6.44874673452241, 6.69874673452241, 6.94874673452241, 7.19874673452241, 7.44874673452241,
7.896284115034163, 8.642031412999936, 9.729590437636922, 11.099266713005452, 11.958968797813638]
x_traj= [t.robot.FK(q,arm=LEFT).tolist() for q in q_traj]
#['s0', 's1', 'e0', 'e1', 'w0', 'w1', 'w2']
#qvel_limits= [0.5, 0.5, 0.8, 0.8, 0.8, 0.8, 0.8] #ORIGINAL
#qvel_limits= [0.5, 0.5, 0.6, 0.6, 0.6, 0.6, 0.6]
#qvel_limits= [0.1]*7
#LimitQTrajVel(q_start=q_traj[0], q_traj=q_traj, t_traj=t_traj, qvel_limits=qvel_limits)
#print 'Modified q_traj:',q_traj
#print 'Modified t_traj:',t_traj
'''WARNING: In the following code, following the trajectory always fails.
This is due to a joint angle, e.g.
'w1' (6th element) in
[-1.0242863194214729, -0.38779126582438822, 0.17653166339142254, 2.3179885163791289, -0.49143987933855837, -1.9329493608376833, 0.1015981943603622],
exceeds the joint limits.
The trajectory was originally generated by IK with KDL, which is the reason of this issue.
NOTE: In IK in baxter_pykdl, the joint limits are not taken into account.
There was an error from /joint_trajectory_action_server:
[ERROR] [WallTime: 1452044236.093979] /joint_trajectory_action_server: Exceeded Error Threshold on left_w1: -0.350152589571
NOTE: This code has been fixed by myself@2016-01-06
With this new baxter_pykdl code, IK fails in FollowXTraj.
'''
t.robot.MoveToQ(q_traj[0])
CPrint(1,'Follow the trajectory. Ready?')
if not t.AskYesNo(): return
#t.robot.FollowQTraj(q_traj, t_traj, arm=LEFT, blocking=True)
t.robot.FollowXTraj(x_traj, t_traj, arm=LEFT, blocking=True)
#t.robot.MoveToQ(q_traj[0])
#for q,tm in zip(q_traj,t_traj):
#CPrint(1,'Move to next point?',q)
#if not t.AskYesNo(): break
#t.robot.MoveToQ(q)
| [
"[email protected]"
] | |
fba4aaf68de22858534fd2c92373acfe6ed79e88 | 4b0e25df6e219ed71689405f371f097b715869ee | /scripts/util/dump_yearly.py | 767b09337b14fc709769b3a1c64ccdb89e76213d | [
"MIT"
] | permissive | geogismx/dep | 4ce9be3c6a42c3ad8dd1762d7819ab19404fae3f | 303c715e70000b48c5c71df0b59e259b8c246e9c | refs/heads/master | 2020-04-16T17:50:11.805351 | 2019-01-09T16:21:59 | 2019-01-09T16:21:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,670 | py | """Dump some monthly data"""
from __future__ import print_function
import pandas as pd
from pandas.io.sql import read_sql
from pyiem.util import get_dbconn
# East Nish
DATA = """102400030603
102400030704
102400030701
102400030702
102400030601
102400030602
102400030303
102400030304
102400030406
102400030502
102400030302
102400030104
102400030301
102400030404
102400030405
102400030102
102400030501
102400030103
102400030206
102400030402
102400030101
102400030401
102400030403
102400030204
102400030205
102400030203
102400030202
102400030201
102400030707
102400030705
102400030708
102400030703
102400030706"""
DATA = """102400020402
102400020607
102400020505
102400020703
102400020606
102400020804
102400020803
102400020705
102400020802
102400020704
102400020805
102400020801
102400020303
102400020504
102400020401
102400020503
102400020702
102400020301
102400020605
102400020502
102400020603
102400020302
102400020501
102400020602
102400020105
102400020604
102400020701
102400020106
102400020209
102400020601
102400020104
102400020103
102400020208
102400020207
102400020102
102400020101
102400020203
102400020205
102400020202
102400020206
102400020204
102400020201
102400020806
102400020706"""
# Beaver Creek
DATA = """071000040901
071000040902
071000040903
071000040904
071000040905
071000040906
071000040907
071000040908
071000040909
071000040910
071000040911"""
# North Raccoon
DATA = """071000061502
071000061602
071000060605
071000061201
071000060401
071000061501
071000060802
071000060208
071000060403
071000061202
071000060602
071000060207
071000060502
071000061004
071000061402
071000061204
071000060805
071000060201
071000061001
071000060904
071000060702
071000061002
071000060203
071000060205
071000061703
071000060304
071000060601
071000060310
071000061405
071000061203
071000060804
071000060903
071000060604
071000060803
071000060505
071000061701
071000060303
071000061702
071000061301
071000061302
071000061005
071000061401
071000060308
071000061504
071000060306
071000060301
071000061003
071000061102
071000060902
071000060901
071000060603
071000060305
071000060701
071000060503
071000060101
071000060103
071000060204
071000061403
071000061404
071000060206
071000060307
071000061503
071000060309
071000060302
071000060202
071000060801
071000061406
071000060504
071000060501
071000061601
071000061505
071000060402
071000061101
071000060806
071000060102"""
HUCS = [x.strip() for x in DATA.split("\n")]
def main():
"""Go Main Go"""
pgconn = get_dbconn('idep', user='nobody')
df = read_sql("""
SELECT huc_12, extract(year from valid) as year,
sum(avg_loss) * 4.463 as loss_ton_per_acre,
sum(avg_delivery) * 4.463 as delivery_ton_per_acre,
sum(qc_precip) / 25.4 as precip_inch,
sum(avg_runoff) / 25.4 as runoff_inch
from results_by_huc12 WHERE
scenario = 0 and huc_12 in %s and valid >= '2007-01-01'
and valid < '2018-01-01' GROUP by huc_12, year
""", pgconn, params=(tuple(HUCS), ))
writer = pd.ExcelWriter(
'dep_yearly.xlsx', options={'remove_timezone': True})
df.to_excel(writer, 'Yearly Totals', index=False)
gdf = df.groupby('huc_12').mean()
gdf[['loss_ton_per_acre', 'delivery_ton_per_acre', 'precip_inch',
'runoff_inch']].to_excel(writer, 'Yearly Averages')
format1 = writer.book.add_format({'num_format': '0.00'})
worksheet = writer.sheets['Yearly Totals']
worksheet.set_column('A:A', 18)
worksheet.set_column('C:F', 20, format1)
worksheet = writer.sheets['Yearly Averages']
worksheet.set_column('A:A', 18)
worksheet.set_column('B:E', 20, format1)
writer.save()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
9236262b222aaefac7e5c01449e61de6661ebb41 | bab1fb6a6879574f010d79b0bccd3ad2681a0034 | /forumsite/forum/post/admin.py | 6f314e5be0a43b8dfa0f652393edec73842f96d2 | [
"Unlicense"
] | permissive | lyjhj/dj | ee5e2234d1e1347e5bdffeffcc7a176bd47934a2 | 867ae008a3a65fb38fb0ed95b93c616e753f3903 | refs/heads/master | 2020-03-23T18:00:15.498064 | 2018-06-13T11:17:38 | 2018-06-13T11:17:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Category, Item, Comment
admin.site.register(Category)
class CommentInline(admin.TabularInline):
model = Comment
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
list_display = ['title', 'publish', 'created', 'cat']
list_filter = ['publish', 'cat']
list_search = ['title', 'body']
inlines = [ CommentInline, ]
| [
"[email protected]"
] | |
ed97e7bb8c4a7062ac88f90ec47428281e6b77b5 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/6666dc4eec284bd286b22d2f63110743.py | 2ead89f1a06dfe3793ffe43dd3eb7edce4e3206a | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 432 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def is_question(what):
return what[-1] == '?'
def is_yelling(what):
return what.isupper()
def is_empty(what):
return not what.strip()
def hey(what):
if is_yelling(what):
return 'Whoa, chill out!'
elif is_empty(what):
return 'Fine. Be that way!'
elif is_question(what):
return 'Sure.'
else:
return 'Whatever.'
| [
"[email protected]"
] | |
a4f3850908d168eab535b69ca348c26c249e1c88 | 0cc7fbe68074113b3db7a6b42a303dcd970da326 | /exercises/reinforcement_learning/monte_carlo_policy_iteration.py | 6187b706ca8664251e6a496def7406e0349a1dde | [] | no_license | jay-woo/comprobo2014 | a7c32a37b56933635ece69821b00f0f93df83d15 | 4e8b77bb5a9926b3b7735020fc24f294f61533b8 | refs/heads/master | 2020-04-01T20:03:23.113694 | 2015-09-01T00:41:41 | 2015-09-01T00:41:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,893 | py | #!/usr/bin/env python
"""
This Python script demonstrates the basics of the Monte-Carlo Policy Iteration algorithm with exploring starts
"""
import numpy as np
import pdb
from copy import copy
def evaluate_policy_montecarlo(policy,p,r,gamma,n):
""" computes v^policy and q^policy using monte-carlo evaluation
"""
# compute the q function as well
q = np.zeros((p.shape[1],p.shape[0]))
v = np.zeros((p.shape[1],1))
for i in range(q.shape[0]):
for j in range(q.shape[1]):
returns = []
for trial in range(n):
x = i
u = j
rsum = r[x,u]
probs = p[u,x,:]
# 100 is an arbitrary threshold where gamma**100 is sufficiently low
for t in range(1,100):
probs = p[u,x,:]
x = np.random.choice(np.arange(p.shape[1]),p=probs)
u = policy[x]
rsum += r[x,u]*gamma**t
returns.append(rsum)
q[i,j] = sum(returns)/n
for i in range(q.shape[0]):
v[i] = q[i,policy[i]]
return v,q
def improve_policy(policy,q):
newpolicy = copy(policy)
for i in range(len(policy)):
newpolicy[i] = np.argmax(q[i,:])
return newpolicy
# encodes p(x_{t+1} | x_t, u_t), the first dimension is u_t, next is x_t, and the third is x_{t+1}
p = np.array([[[0.6, .25, .15],
[0.0, 1.0, 0.0],
[0.3, 0.0, 0.7]],
[[0.1, .8, .1],
[0.0, 0.0, 1.0],
[0.0, 0.5, 0.5]]])
# encodes r(x_t, u_t), the first dimension is x_t, and the second is u_t
r = np.array([[0.0, 0.0],
[0.0, 0.0],
[1.0, 1.0]])
# the discount factor for the MDP
gamma = 0.9
# initialize the policy (at first always execute action 0)
policy = [0, 0, 0]
print "policy is ", policy
converged = False
while not(converged):
# evaluate the policy
v,q = evaluate_policy_montecarlo(policy,p,r,gamma,100)
print "value function is", v
oldpolicy = policy
# improve the policy
policy = improve_policy(policy,q)
converged = oldpolicy == policy
print "new policy is ", policy | [
"[email protected]"
] | |
fb2755119d89487927d6f007973870fb4c5e228f | 83316dd8a01070711fe8c42cd38d245da9a4711e | /testmethodology/results/ResultUtils.py | 836fe4807b83806f812225e9b4955097b55d74b9 | [] | no_license | CmWork/STAKCommands | fa46d561d0a85ac49c14f1b1fc6c014d2e0955bc | 8b3fb68912116f7973fa9b3677d4e3d43c92f194 | refs/heads/master | 2020-05-02T00:57:20.940300 | 2015-07-10T04:32:37 | 2015-07-10T04:32:37 | 38,860,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,669 | py | import os
import json
from StcIntPythonPL import *
from spirent.methodology.results.Status import Status
from spirent.methodology.results.ResultEnum import (
EnumVerdict,
EnumDataClass,
EnumDataFormat
)
import spirent.methodology.results.LogUtils as logger
from spirent.methodology.results.ResultConst import ResultConst
from spirent.methodology.results.ProviderConst import ProviderConst as pc
import copy
import datetime
import time
def summarize_status(obj):
verdict = EnumVerdict.none
verdict_text = ResultConst.NONE
for result in obj._data:
if Status.get_dict_name() in result:
if Status.get_apply_verdict_dict_name() in result[Status.get_dict_name()]:
if result[Status.get_dict_name()][Status.get_apply_verdict_dict_name()] is False:
continue
new_verdict = result[Status.get_dict_name()][Status.get_verdict_dict_name()]
if EnumVerdict.do_override_verdict(verdict, new_verdict):
verdict = new_verdict
verdict_text = \
result[Status.get_dict_name()][Status.get_verdict_text_dict_name()]
obj._status.verdict = verdict
if verdict == EnumVerdict.passed:
obj._status.verdict_text = ResultConst.TEST_PASS_VERDICT_TEXT
else:
obj._status.verdict_text = verdict_text
def generate_report_file(report_name, data):
filename = os.path.join(CTestResultSettingExt.GetResultDbBaseDirectory(), report_name)
logger.info("Saving file:" + filename)
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
f = open(filename, "w")
f.write(json.dumps(data, separators=(',', ':'), sort_keys=False))
f.close()
CFileManager.AddFile(filename, 'RESULT')
return filename
def wrap_data_as_single_group(data, group_tag=ResultConst.ALL_GROUPS):
groupdata = {}
groupdata[ResultConst.TAG] = group_tag
if not isinstance(data, list):
groupdata[ResultConst.CHILDREN] = []
groupdata[ResultConst.CHILDREN].append(data)
else:
groupdata[ResultConst.CHILDREN] = data
return groupdata
def report_group_comparator(data1, data2):
value1 = CMeta.GetEnumerationValue('spirent.methodology.ResultBaseCommand',
'ReportGroup',
str(data1[pc.INFO][pc.REPORT_GROUP]))
value2 = CMeta.GetEnumerationValue('spirent.methodology.ResultBaseCommand',
'ReportGroup',
str(data2[pc.INFO][pc.REPORT_GROUP]))
return value1 - value2
def group_data_using_report_group(data):
data.sort(report_group_comparator)
groupdata = {}
groupdata[ResultConst.TAG] = ResultConst.ALL_GROUPS
groupdata[ResultConst.CHILDREN] = []
result_group = {}
result_group[pc.CLASS] = EnumDataClass.result_group
result_group[pc.DATA_FORMAT] = EnumDataFormat.group
mydata = {}
mydata[ResultConst.TAG] = 'ResultGroup'
mydata[ResultConst.CHILDREN] = []
result_group[pc.DATA] = mydata
cgdata = copy.deepcopy(result_group)
cdata = cgdata[pc.DATA]
cgroup = pc.DEFAULT_REPORT_GROUP
for pdata in data:
if cdata[ResultConst.CHILDREN]:
if pdata[pc.INFO][pc.REPORT_GROUP] == cgroup:
cdata[ResultConst.CHILDREN].append(pdata)
continue
else:
groupdata[ResultConst.CHILDREN].append(cgdata)
cgdata = copy.deepcopy(result_group)
cdata = cgdata[pc.DATA]
cdata[ResultConst.CHILDREN].append(pdata)
cgroup = pdata[pc.INFO][pc.REPORT_GROUP]
cdata[ResultConst.TAG] = "Report Group " + cgroup
if cdata[ResultConst.CHILDREN]:
groupdata[ResultConst.CHILDREN].append(cgdata)
return groupdata
def validate_report_group(stringValue):
try:
CMeta.GetEnumerationValue('spirent.methodology.ResultBaseCommand',
'ReportGroup',
str(stringValue))
return stringValue
except:
return pc.DEFAULT_REPORT_GROUP
def insert_report_group_if_not_defined(dict_data):
if not (pc.INFO in dict_data):
dict_data[pc.INFO] = {}
if not (pc.REPORT_GROUP in dict_data[pc.INFO]):
dict_data[pc.INFO][pc.REPORT_GROUP] = pc.DEFAULT_REPORT_GROUP
else:
dict_data[pc.INFO][pc.REPORT_GROUP] = \
validate_report_group(dict_data[pc.INFO][pc.REPORT_GROUP])
return dict_data
def get_current_time_string():
return datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S') | [
"[email protected]"
] | |
f2d2a1d1453a1e73523857c1bcf4515f83691849 | 9f5fcff2513f2d78f27e5313698dcc47fce1e754 | /Experiment/Parallel_EA_NAS/graphnas/gnn_model_manager.py | 7cbd75a812a6420f3992d3d1dea04e4602bf9288 | [
"Apache-2.0"
] | permissive | ncucjm/notebook | c2495f790e9fc2ca55c1c29a8eaa2dc1bfe7463f | 7271a0d1b10cdd6298e223c7ff150d4df031aa76 | refs/heads/master | 2023-07-20T05:55:48.946687 | 2021-01-27T09:12:19 | 2021-01-27T09:12:19 | 202,633,012 | 0 | 0 | null | 2023-07-06T21:28:29 | 2019-08-16T00:58:45 | Jupyter Notebook | UTF-8 | Python | false | false | 8,422 | py | import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from dgl import DGLGraph
from dgl.data import load_data
from graphnas.gnn import GraphNet
from graphnas.utils.model_utils import EarlyStop, TopAverage, process_action
def load(args, save_file=".npy"):
save_file = args.dataset + save_file
if os.path.exists(save_file):
return np.load(save_file).tolist()
else:
datas = load_data(args)
np.save(save_file, datas)
return datas
def evaluate(output, labels, mask):
_, indices = torch.max(output, dim=1)
# print('indices dim: ', indices.dim())
# print('labels dim: ', labels.dim())
correct = torch.sum(indices[mask] == labels[mask])
return correct.item() * 1.0 / mask.sum().item()
# manager the train process of GNN on citation dataset
class CitationGNNManager(object):
def __init__(self, args):
self.args = args
if hasattr(args, 'dataset') and args.dataset in ["cora", "citeseer", "pubmed"]:
self.data = load(args)
self.args.in_feats = self.in_feats = self.data.features.shape[1]
self.args.num_class = self.n_classes = self.data.num_labels
self.early_stop_manager = EarlyStop(10)
self.reward_manager = TopAverage(10)
print('the experiment config:', '\n', args)
self.args = args
self.drop_out = args.in_drop
self.multi_label = args.multi_label
self.lr = args.lr
self.weight_decay = args.weight_decay
self.retrain_epochs = args.retrain_epochs
self.loss_fn = torch.nn.BCELoss() # binary cross entropy loss
self.epochs = args.epochs
self.train_graph_index = 0
self.train_set_length = 10
self.param_file = args.param_file
self.shared_params = None
self.loss_fn = torch.nn.functional.nll_loss
def load_param(self):
# don't share param
pass
def save_param(self, model, update_all=False):
# don't share param
pass
# train from scratch
def evaluate(self, actions=None, format="two"):
actions = process_action(actions, format, self.args)
print("train action:", actions)
# create model
model = self.build_gnn(actions)
if self.args.cuda:
model.cuda()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
try:
model, val_acc, test_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs,
cuda=self.args.cuda, return_best=True,
half_stop_score=max(self.reward_manager.get_top_average() * 0.7,
0.4))
except RuntimeError as e:
if "cuda" in str(e) or "CUDA" in str(e):
print(e)
val_acc = 0
test_acc = 0
else:
raise e
return val_acc, test_acc
# train from scratch
def train(self, actions=None, format="two"):
origin_action = actions
actions = process_action(actions, format, self.args)
print("train gnn structures:", actions)
# create model
model = self.build_gnn(actions)
try:
if self.args.cuda:
model.cuda()
# use optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=self.args.lr, weight_decay=self.args.weight_decay)
model, val_acc = self.run_model(model, optimizer, self.loss_fn, self.data, self.epochs, cuda=self.args.cuda,
half_stop_score=max(self.reward_manager.get_top_average() * 0.7, 0.4)
# , show_info=True
)
except RuntimeError as e:
if "cuda" in str(e) or "CUDA" in str(e):
print(e)
val_acc = 0
else:
raise e
reward = self.reward_manager.get_reward(val_acc)
# 模型gnn, reward, val_acc
# self.record_action_info(origin_action, reward, val_acc)
return reward, val_acc
def record_action_info(self, origin_action, reward, val_acc):
with open(self.args.dataset + "_" + self.args.search_mode + self.args.submanager_log_file, "a") as file:
file.write(str(origin_action))
file.write(";")
file.write(str(val_acc))
file.write("\n")
def build_gnn(self, actions):
model = GraphNet(actions, self.in_feats, self.n_classes, drop_out=self.args.in_drop, multi_label=False,
batch_normal=False)
return model
def retrain(self, actions, format="two"):
return self.train(actions, format)
def test_with_param(self, actions=None, format="two", with_retrain=False):
return self.train(actions, format)
@staticmethod
def run_model(model, optimizer, loss_fn, data, epochs, early_stop=5, tmp_model_file="geo_citation.pkl",
half_stop_score=0, return_best=False, cuda=True, need_early_stop=False, show_info=False):
print('chamou o run_model da CitationGNNManager')
dur = []
begin_time = time.time()
best_performance = 0
min_val_loss = float("inf")
min_train_loss = float("inf")
model_val_acc = 0
features, g, labels, mask, val_mask, test_mask, n_edges = CitationGNNManager.prepare_data(data, cuda)
for epoch in range(1, epochs + 1):
model.train()
t0 = time.time()
# forward
logits = model(features, g)
logits = F.log_softmax(logits, 1)
loss = loss_fn(logits[mask], labels[mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss = loss.item()
# evaluate
model.eval()
logits = model(features, g)
logits = F.log_softmax(logits, 1)
train_acc = evaluate(logits, labels, mask)
dur.append(time.time() - t0)
val_loss = float(loss_fn(logits[val_mask], labels[val_mask]))
val_acc = evaluate(logits, labels, val_mask)
test_acc = evaluate(logits, labels, test_mask)
if val_loss < min_val_loss: # and train_loss < min_train_loss
min_val_loss = val_loss
min_train_loss = train_loss
model_val_acc = val_acc
if test_acc > best_performance:
best_performance = test_acc
if show_info:
print(
"Epoch {:05d} | Loss {:.4f} | Time(s) {:.4f} | acc {:.4f} | val_acc {:.4f} | test_acc {:.4f}".format(
epoch, loss.item(), np.mean(dur), train_acc, val_acc, test_acc))
end_time = time.time()
print("Each Epoch Cost Time: %f " % ((end_time - begin_time) / epoch))
print(f"val_score:{model_val_acc},test_score:{best_performance}")
if return_best:
return model, model_val_acc, best_performance
else:
return model, model_val_acc
@staticmethod
def prepare_data(data, cuda=True):
features = torch.FloatTensor(data.features)
print('features: ', features)
labels = torch.LongTensor(data.labels)
print('labels: ', labels)
mask = torch.ByteTensor(data.train_mask)
print('mask: ', mask)
test_mask = torch.ByteTensor(data.test_mask)
print('test_mask: ', test_mask)
val_mask = torch.ByteTensor(data.val_mask)
print('val_mask: ', val_mask)
n_edges = data.graph.number_of_edges()
print('n_edges: ', n_edges)
# create DGL graph
g = DGLGraph(data.graph)
# add self loop
g.add_edges(g.nodes(), g.nodes())
degs = g.in_degrees().float()
norm = torch.pow(degs, -0.5)
norm[torch.isinf(norm)] = 0
if cuda:
features = features.cuda()
labels = labels.cuda()
norm = norm.cuda()
g.ndata['norm'] = norm.unsqueeze(1)
return features, g, labels, mask, val_mask, test_mask, n_edges
| [
"[email protected]"
] | |
564452fb10213587922272db7d1083be27fc5b61 | 28ff818fcf120f52bde8d26dbaa654e951595009 | /APUNTES/PYTHON/EJEMPLOS_FORMACION/djangoCMSproject/venv/Lib/site-packages/filer/models/foldermodels.py | 6ba2e85233d96d43db46270b8b2fac84b4f5fe5a | [] | no_license | pcmaestro/my_repository | 9c87382b2c30e5d0985a08ddfa172995de80a45a | 13d47e67a49b5996654a5386c024e61c2c753e6b | refs/heads/master | 2022-07-13T20:38:14.614763 | 2020-08-14T18:58:44 | 2020-08-14T18:58:44 | 234,505,688 | 2 | 0 | null | 2022-06-21T03:58:12 | 2020-01-17T08:32:56 | Python | UTF-8 | Python | false | false | 12,171 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.conf import settings
from django.contrib.auth import models as auth_models
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.urls import reverse
from django.utils.http import urlquote
from django.utils.translation import ugettext_lazy as _
import mptt
from six import python_2_unicode_compatible
from .. import settings as filer_settings
from . import mixins
class FolderManager(models.Manager):
def with_bad_metadata(self):
return self.get_query_set().filter(has_all_mandatory_data=False)
class FolderPermissionManager(models.Manager):
"""
Theses methods are called by introspection from "has_generic_permisison" on
the folder model.
"""
def get_read_id_list(self, user):
"""
Give a list of a Folders where the user has read rights or the string
"All" if the user has all rights.
"""
return self.__get_id_list(user, "can_read")
def get_edit_id_list(self, user):
return self.__get_id_list(user, "can_edit")
def get_add_children_id_list(self, user):
return self.__get_id_list(user, "can_add_children")
def __get_id_list(self, user, attr):
if user.is_superuser or not filer_settings.FILER_ENABLE_PERMISSIONS:
return 'All'
allow_list = set()
deny_list = set()
group_ids = user.groups.all().values_list('id', flat=True)
q = Q(user=user) | Q(group__in=group_ids) | Q(everybody=True)
perms = self.filter(q).order_by('folder__tree_id', 'folder__level',
'folder__lft')
for perm in perms:
p = getattr(perm, attr)
if p is None:
# Not allow nor deny, we continue with the next permission
continue
if not perm.folder:
assert perm.type == FolderPermission.ALL
if p == FolderPermission.ALLOW:
allow_list.update(Folder.objects.all().values_list('id', flat=True))
else:
deny_list.update(Folder.objects.all().values_list('id', flat=True))
continue
folder_id = perm.folder.id
if p == FolderPermission.ALLOW:
allow_list.add(folder_id)
else:
deny_list.add(folder_id)
if perm.type == FolderPermission.CHILDREN:
if p == FolderPermission.ALLOW:
allow_list.update(perm.folder.get_descendants().values_list('id', flat=True))
else:
deny_list.update(perm.folder.get_descendants().values_list('id', flat=True))
# Deny has precedence over allow
return allow_list - deny_list
@python_2_unicode_compatible
class Folder(models.Model, mixins.IconsMixin):
"""
Represents a Folder that things (files) can be put into. Folders are *NOT*
mirrored in the Filesystem and can have any unicode chars as their name.
Other models may attach to a folder with a ForeignKey. If the related name
ends with "_files" they will automatically be listed in the
folder.files list along with all the other models that link to the folder
in this way. Make sure the linked models obey the AbstractFile interface
(Duck Type).
"""
file_type = 'Folder'
is_root = False
can_have_subfolders = True
_icon = 'plainfolder'
# explicitly define MPTT fields which would otherwise change
# and create a migration, depending on django-mptt version
# (see: https://github.com/django-mptt/django-mptt/pull/578)
level = models.PositiveIntegerField(editable=False)
lft = models.PositiveIntegerField(editable=False)
rght = models.PositiveIntegerField(editable=False)
parent = models.ForeignKey(
'self',
verbose_name=('parent'),
null=True,
blank=True,
related_name='children',
on_delete=models.CASCADE,
)
name = models.CharField(_('name'), max_length=255)
owner = models.ForeignKey(
getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
verbose_name=_('owner'),
related_name='filer_owned_folders',
on_delete=models.SET_NULL,
null=True,
blank=True,
)
uploaded_at = models.DateTimeField(_('uploaded at'), auto_now_add=True)
created_at = models.DateTimeField(_('created at'), auto_now_add=True)
modified_at = models.DateTimeField(_('modified at'), auto_now=True)
objects = FolderManager()
@property
def file_count(self):
if not hasattr(self, '_file_count_cache'):
self._file_count_cache = self.files.count()
return self._file_count_cache
@property
def children_count(self):
if not hasattr(self, '_children_count_cache'):
self._children_count_cache = self.children.count()
return self._children_count_cache
@property
def item_count(self):
return self.file_count + self.children_count
@property
def files(self):
return self.all_files.all()
@property
def logical_path(self):
"""
Gets logical path of the folder in the tree structure.
Used to generate breadcrumbs
"""
folder_path = []
if self.parent:
folder_path.extend(self.parent.get_ancestors())
folder_path.append(self.parent)
return folder_path
@property
def pretty_logical_path(self):
return "/%s" % "/".join([f.name for f in self.logical_path + [self]])
@property
def quoted_logical_path(self):
return urlquote(self.pretty_logical_path)
def has_edit_permission(self, request):
return self.has_generic_permission(request, 'edit')
def has_read_permission(self, request):
return self.has_generic_permission(request, 'read')
def has_add_children_permission(self, request):
return self.has_generic_permission(request, 'add_children')
def has_generic_permission(self, request, permission_type):
"""
Return true if the current user has permission on this
folder. Return the string 'ALL' if the user has all rights.
"""
user = request.user
if not user.is_authenticated:
return False
elif user.is_superuser:
return True
elif user == self.owner:
return True
else:
if not hasattr(self, "permission_cache") or\
permission_type not in self.permission_cache or \
request.user.pk != self.permission_cache['user'].pk:
if not hasattr(self, "permission_cache") or request.user.pk != self.permission_cache['user'].pk:
self.permission_cache = {
'user': request.user,
}
# This calls methods on the manager i.e. get_read_id_list()
func = getattr(FolderPermission.objects,
"get_%s_id_list" % permission_type)
permission = func(user)
if permission == "All":
self.permission_cache[permission_type] = True
self.permission_cache['read'] = True
self.permission_cache['edit'] = True
self.permission_cache['add_children'] = True
else:
self.permission_cache[permission_type] = self.id in permission
return self.permission_cache[permission_type]
def get_admin_change_url(self):
return reverse('admin:filer_folder_change', args=(self.id,))
def get_admin_directory_listing_url_path(self):
return reverse('admin:filer-directory_listing', args=(self.id,))
def get_admin_delete_url(self):
try:
# Django <=1.6
model_name = self._meta.module_name
except AttributeError:
# Django >1.6
model_name = self._meta.model_name
return reverse(
'admin:{0}_{1}_delete'.format(self._meta.app_label, model_name,),
args=(self.pk,))
def __str__(self):
return "%s" % (self.name,)
def contains_folder(self, folder_name):
try:
self.children.get(name=folder_name)
return True
except Folder.DoesNotExist:
return False
class Meta(object):
# see: https://github.com/django-mptt/django-mptt/pull/577
index_together = (('tree_id', 'lft'),)
unique_together = (('parent', 'name'),)
ordering = ('name',)
permissions = (("can_use_directory_listing",
"Can use directory listing"),)
app_label = 'filer'
verbose_name = _("Folder")
verbose_name_plural = _("Folders")
# MPTT registration
try:
mptt.register(Folder)
except mptt.AlreadyRegistered:
pass
@python_2_unicode_compatible
class FolderPermission(models.Model):
ALL = 0
THIS = 1
CHILDREN = 2
ALLOW = 1
DENY = 0
TYPES = (
(ALL, _('all items')),
(THIS, _('this item only')),
(CHILDREN, _('this item and all children')),
)
PERMISIONS = (
(ALLOW, _('allow')),
(DENY, _('deny')),
)
folder = models.ForeignKey(
Folder,
verbose_name=('folder'),
null=True,
blank=True,
on_delete=models.CASCADE,
)
type = models.SmallIntegerField(_('type'), choices=TYPES, default=ALL)
user = models.ForeignKey(getattr(settings, 'AUTH_USER_MODEL', 'auth.User'),
related_name="filer_folder_permissions", on_delete=models.SET_NULL,
verbose_name=_("user"), blank=True, null=True)
group = models.ForeignKey(
auth_models.Group,
related_name="filer_folder_permissions",
verbose_name=_("group"),
blank=True,
null=True,
on_delete=models.CASCADE,
)
everybody = models.BooleanField(_("everybody"), default=False)
can_edit = models.SmallIntegerField(_("can edit"), choices=PERMISIONS, blank=True, null=True, default=None)
can_read = models.SmallIntegerField(_("can read"), choices=PERMISIONS, blank=True, null=True, default=None)
can_add_children = models.SmallIntegerField(_("can add children"), choices=PERMISIONS, blank=True, null=True, default=None)
objects = FolderPermissionManager()
def __str__(self):
if self.folder:
name = '%s' % self.folder
else:
name = 'All Folders'
ug = []
if self.everybody:
ug.append('Everybody')
else:
if self.group:
ug.append("Group: %s" % self.group)
if self.user:
ug.append("User: %s" % self.user)
usergroup = " ".join(ug)
perms = []
for s in ['can_edit', 'can_read', 'can_add_children']:
perm = getattr(self, s)
if perm == self.ALLOW:
perms.append(s)
elif perm == self.DENY:
perms.append('!%s' % s)
perms = ', '.join(perms)
return "Folder: '%s'->%s [%s] [%s]" % (
name, self.get_type_display(),
perms, usergroup)
def clean(self):
if self.type == self.ALL and self.folder:
raise ValidationError('Folder cannot be selected with type "all items".')
if self.type != self.ALL and not self.folder:
raise ValidationError('Folder has to be selected when type is not "all items".')
if self.everybody and (self.user or self.group):
raise ValidationError('User or group cannot be selected together with "everybody".')
if not self.user and not self.group and not self.everybody:
raise ValidationError('At least one of user, group, or "everybody" has to be selected.')
class Meta(object):
verbose_name = _('folder permission')
verbose_name_plural = _('folder permissions')
app_label = 'filer'
| [
"[email protected]"
] | |
e40bad0cc4a7daf31b43b5798a29c426db6e2f2a | 65dce36be9eb2078def7434455bdb41e4fc37394 | /234 Palindrome Linked List.py | 57debdf93004c385c901d8d4fc1ac7a1101d855a | [] | no_license | EvianTan/Lintcode-Leetcode | 9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a | d12dd31e98c2bf24acc20c5634adfa950e68bd97 | refs/heads/master | 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | '''
Given a singly linked list, determine if it is a palindrome.
Follow up:
Could you do it in O(n) time and O(1) space?
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
nodes = []
while head:
nodes.append(head.val)
head = head.next
return nodes == nodes[::-1] | [
"[email protected]"
] | |
8d7b3d53d319729a792f4c4c9540b11d79a2188e | 1646b3fe9000c3109695e99b4bb75679577906ff | /187.RepeatedDNASequences.py | 4c2cf6b3b75206c0e73051291ec5b5a969f38cf3 | [] | no_license | yao9208/lc | 5ecf6720886beb951c9a70433f53a0ec0bcb74dc | 024c1b5c98a9e85706e110fc2be8dcebf0f460c3 | refs/heads/master | 2020-04-03T20:55:40.199637 | 2017-02-10T08:30:46 | 2017-02-10T08:30:46 | 56,478,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from sets import Set
class Solution(object):
def findRepeatedDnaSequences(self, s):
"""
:type s: str
:rtype: List[str]
"""
result = []
dic = Set()
resultSet = Set()
for i in range(len(s)-9):
sub = s[i:i+10]
#key = self.transform(sub)
if sub in dic:
resultSet.add(sub)
else:
dic.add(sub)
return list(resultSet)
def transform(self, s):
dic = {'A':0, 'T':1, 'C':2, 'G':3}
result = 0
for ch in s:
result = result<<2+dic[ch]
return result
| [
"[email protected]"
] | |
cc0b564573152e35e57785774f3f8f3b7ae477e6 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /pdMwiMpYkJkn8WY83_4.py | f98843713468da39e6fdfb127999e93b793e8ef5 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | """
Write a function that **recursively** determines if a string is a palindrome.
### Examples
is_palindrome("abcba") ➞ True
is_palindrome("b") ➞ True
is_palindrome("") ➞ True
is_palindrome("ad") ➞ False
### Notes
An empty string counts as a palindrome.
"""
def is_palindrome(w):
if len(w) <= 1:
return True
elif w[0] == w[-1]:
return is_palindrome(w[1:-1])
else:
return False
| [
"[email protected]"
] | |
0189ebc033ef3f0dba5aa432cc867ce49bd47944 | 3da15577cf3faeeab75cf48f6230372e22e1ae39 | /shop/api/permissions.py | 08b7c9799ae7bdca64b6519bd140092e074eec4e | [] | no_license | deepdik/cityapl | 991428e52f0bd33ba48bf42391244661512edd17 | 9e62ce2924018b0ca5e4d2e884279128605d5e0c | refs/heads/master | 2020-04-04T22:37:42.507940 | 2018-11-06T06:02:05 | 2018-11-06T06:02:05 | 156,329,922 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 584 | py | from rest_framework.permissions import BasePermission,SAFE_METHODS
class IsOwnerOrReadOnly(BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.user == request.user
class IsUser(BasePermission):
def has_permission(self, request, view):
print("hello")
if 'HTTP_USER_AGENT' in request.META:
# print (request.META[''])
if 'Mozilla' in request.META['HTTP_USER_AGENT']:
if(request.META.get('HTTP_REFERER')):
return True
return False | [
"[email protected]"
] | |
270980529a30f03e325df9ebbce376402f7393dd | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_fop.py | 6e20e2da412efa43b68bdc5ddc1b75f6b64b011f | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 388 | py |
#calss header
class _FOP():
def __init__(self,):
self.name = "FOP"
self.definitions = [u'(especially in the past) a man who is extremely interested in his appearance and who wears very decorative clothes']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
a174d3dad7d7bfec074ca8e6f17544d6a1a70f26 | 7278b31ebd6362bebf6986c2f3eca89d87201eb2 | /exp/viroscopy/model/HIVEpidemicModel.py | f629736eb99ec5dbe3f39dfa031170162bfea751 | [] | no_license | malcolmreynolds/APGL | c19827b1b834d3491d98a751c91838177aedc29e | 1703510cbb51ec6df0efe1de850cd48ef7004b00 | refs/heads/master | 2020-12-25T05:52:45.826947 | 2013-03-26T12:30:00 | 2013-03-26T12:30:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,291 | py | import logging
import numpy
from apgl.graph import *
from apgl.util import *
from exp.viroscopy.model.HIVGraph import HIVGraph
from exp.viroscopy.model.HIVVertices import HIVVertices
class HIVEpidemicModel():
def __init__(self, graph, rates, T=100.0, T0=0.0, metrics=None):
"""
This class models an epidemic occuring via sexual contact. We create an
epidemic model with a HIVGraph and a class which models the rate of
certain events in the model.
:param graph: Initial HIVGraph to use for modelling
:param rates: A class modelling the event rates in the model
:param T0: This is the starting time of the simulation
:param metrics: A graph metrics object
"""
Parameter.checkClass(graph, HIVGraph)
self.graph = graph
self.graph.endEventTime = T0
self.rates = rates
self.setT(T)
self.breakFunc = None
self.standardiseResults = True
self.T0 = T0
self.setRecordStep((self.T-self.T0)/10.0)
self.metrics = metrics
def setT(self, T):
"""
Set the maximum time of the simulation.
"""
Parameter.checkFloat(T, 0.0, float('inf'))
self.T = T
def setT0(self, T0):
"""
Set the start time of the simulation.
"""
Parameter.checkFloat(T0, 0.0, float('inf'))
self.T0 = T0
def setRecordStep(self, recordStep):
"""
Set thetime interval in order to record statistics over the model.
"""
if abs((self.T-self.T0) % recordStep) > 10**-6:
print((self.T-self.T0) % recordStep)
raise ValueError("Record Step must divide exactly into T-T0")
self.recordStep = recordStep
def setParams(self, theta):
"""
This is used to set the parameters of the intial state of this model
in conjunction with ABC model selection.
:param theta: An array containing parameter values
:type theta: `numpy.ndarray`
"""
if theta.shape[0] != 12:
raise ValueError("Theta should be of length 12")
self.graph.setRandomInfected(int(theta[0]), theta[1])
self.rates.setAlpha(theta[2])
self.rates.setNewContactChance(theta[3])
self.rates.setRandDetectRate(theta[4])
self.rates.setCtRatePerPerson(theta[5])
self.rates.setMaxDetects(int(theta[6]))
self.rates.setHeteroContactRate(theta[7])
self.rates.setBiContactRate(theta[8])
self.rates.setWomanManInfectProb(theta[9])
self.rates.setManWomanInfectProb(theta[10])
self.rates.setManBiInfectProb(theta[11])
#@profile
def simulate(self, verboseOut=False):
"""
Simulate epidemic propogation until there are no more infectives or
time T is reached.
"""
if self.graph.getNumEdges()!=0:
raise ValueError("Must start simulation with empty (no edges) graph: " + str(self.graph.getNumEdges()))
susceptibleSet = self.graph.getSusceptibleSet()
infectedSet = self.graph.getInfectedSet()
removedSet = self.graph.getRemovedSet()
#This is the set of people who are having sexual contact
contactSet = susceptibleSet.union(infectedSet)
infectedList = list(infectedSet)
removedList = list(removedSet)
contactList = list(contactSet)
t = self.T0
times = [t]
#A list of lists of infected indices
infectedIndices = [infectedList]
removedIndices = [removedList]
nextStep = t + self.recordStep
numContacts = 0
logging.debug("Starting simulation at time " + str(t) + " with graph of size " + str(self.graph.size))
#Now, start the simulation
while t < self.T and len(infectedSet) != 0:
contactInds, contactRates = self.rates.contactRates(infectedList, contactList, t)
contactTracingRates = self.rates.contactTracingRates(infectedList, removedSet, t)
randomDetectRates = self.rates.randomDetectionRates(infectedList, t)
#assert contactRates.shape == (len(infectedList), len(contactList))
assert (contactTracingRates == numpy.abs(contactTracingRates)).all()
assert (randomDetectRates == numpy.abs(randomDetectRates)).all()
assert (contactTracingRates!=0).sum() <= self.rates.maxDetects
assert (randomDetectRates!=0).sum() <= self.rates.maxDetects
sigmat = contactRates.sum()
muRSt = numpy.sum(randomDetectRates)
muCTt = numpy.sum(contactTracingRates)
#rhot = sigmat + muRSt + muCTt
#print(randomDetectRates)
assert sigmat >= 0
assert muRSt >= 0
assert muCTt >= 0
sigmaHat = self.rates.upperContactRates(infectedList)
muHat = self.rates.upperDetectionRates(infectedList)
rhoHat = sigmaHat + muHat
#print(muHat)
assert rhoHat >= 0
#Now generate random variable which is the advancement in time
tauPrime = numpy.random.exponential(1/rhoHat)
t = t + tauPrime
assert tauPrime >= 0
#Now compute the probabilities of each event type
contactProb = sigmat/rhoHat
detectionRandom = muRSt/rhoHat
detectionContact = muCTt/rhoHat
#In some rare cases this can be false due to floating point errors
assert sigmat + muRSt + muCTt <= rhoHat + 10**-6, \
"sigmat=%f, muRSt=%f, muCTt=%f, sigmaHat=%f, muHat=%f" % (sigmat, muRSt, muCTt, sigmaHat, muHat)
#Compute random variable
p = numpy.random.rand()
if p < contactProb:
eventInd = Util.randomChoice(contactRates)[0]
infectedIndex = infectedList[eventInd]
contactIndex = contactInds[eventInd]
#Note that each time a sexual contact occurs we weight the edge with the time
self.rates.contactEvent(infectedIndex, contactIndex, t)
numContacts += 1
#Check if the contact results in an infection
q = numpy.random.rand()
if q < self.rates.infectionProbability(infectedIndex, contactIndex, t):
self.graph.vlist.setInfected(contactIndex, t)
infectedSet.add(contactIndex)
susceptibleSet.remove(contactIndex)
elif p >= contactProb and p < contactProb+detectionRandom:
eventInd = Util.randomChoice(randomDetectRates)
newDetectedIndex = infectedList[eventInd]
self.rates.removeEvent(newDetectedIndex, HIVVertices.randomDetect, t)
removedSet.add(newDetectedIndex)
infectedSet.remove(newDetectedIndex)
contactSet.remove(newDetectedIndex)
elif p >= contactProb+detectionRandom and p < contactProb+detectionRandom+detectionContact:
eventInd = Util.randomChoice(contactTracingRates)
newDetectedIndex = infectedList[eventInd]
self.rates.removeEvent(newDetectedIndex, HIVVertices.contactTrace, t)
removedSet.add(newDetectedIndex)
infectedSet.remove(newDetectedIndex)
contactSet.remove(newDetectedIndex)
self.graph.endEventTime = t
assert infectedSet.union(removedSet).union(susceptibleSet) == set(range(self.graph.getNumVertices()))
assert contactSet == infectedSet.union(susceptibleSet)
infectedList = list(infectedSet)
removedList = list(removedSet)
contactList = list(contactSet)
if t >= nextStep:
logging.debug("t-T0=" + str(t-self.T0) + " S=" + str(len(susceptibleSet)) + " I=" + str(len(infectedSet)) + " R=" + str(len(removedSet)) + " C=" + str(numContacts) + " E=" + str(self.graph.getNumEdges()))
infectedIndices.append(infectedList)
removedIndices.append(removedList)
times.append(t)
nextStep += self.recordStep
if self.metrics != None:
self.metrics.addGraph(self.graph)
if self.metrics.shouldBreak():
logging.debug("Breaking as distance has become too large")
break
logging.debug("Finished simulation at time " + str(t) + " for a total time of " + str(t-self.T0))
self.numContacts = numContacts
if verboseOut:
return times, infectedIndices, removedIndices, self.graph
else:
return self.graph
def distance(self):
logging.debug("Distance is " + str(self.metrics.distance()) + ", and final event on graph occured at time " + str(self.graph.endTime() - self.T0))
return self.metrics.distance()
def getNumContacts(self):
return self.numContacts
| [
"[email protected]"
] | |
c030633d3f8acd75c40c4fc8a8369f4a9c37a819 | 334dea3e7941871a6b23be65cfc9b14d6be49db0 | /apps/master/migrations/0011_auto_20210414_2324.py | ff34056bdb39745aa68bcc063db3a7463c01b49c | [] | no_license | HilmiZul/walikelas | e2f3d06dfab3ab48373eda2b1b363fe1e64caef6 | 3febaf97272c78310e488c883a9647b269e25930 | refs/heads/master | 2023-08-15T20:56:58.011519 | 2021-10-07T04:32:43 | 2021-10-07T04:32:43 | 367,083,389 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # Generated by Django 2.2.17 on 2021-04-14 16:24
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('master', '0010_gurumapel_rombel'),
]
operations = [
migrations.RenameField(
model_name='rombel',
old_name='guru',
new_name='walikelas',
),
]
| [
"[email protected]"
] | |
cb4b2fa92a7b554dd223c4162d6d61fa9d634a54 | 4eddf6a34715752dc652571b1ab274f51ceb5da0 | /.history/yjs/test_20210606212007.py | 7d6be688304d2c14aecf1dd4e6af756385e35e82 | [] | no_license | Suelt/Hust-SE-introduction-to-ML | 649aba0e5b41363ceac03330ef02982982a0615d | a66785c3085da573f5748d13608eabf02e616321 | refs/heads/master | 2023-05-27T13:13:41.058545 | 2021-06-10T05:44:02 | 2021-06-10T05:44:02 | 375,582,438 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,503 | py | import numpy as np
from tensorflow.keras.datasets import mnist
import random
class NumpyinMnist():
def __init__(self):
self.layers=2
self.weight=[]
self.weight.append(np.random.randn(30,784))
self.weight.append(np.random.randn(10,30))
self.bias=[]
self.bias.append(np.random.rand(30,1))
self.bias.append(np.random.rand(10,1))
# size[784,30,10]
# w:[output, input]
# b:[output]
def forward(self,x):
for i in range(2):
b=self.bias[i]
w=self.weight[i]
# b_axis1=[]
# for i in range(len(b)):
# b_axis1.append(b[i][0])
z = w@x+b
x = sigmoid(z)
return x
def backpropagation(self, x, y):
x=x.reshape(784,1)
gradient_w = [np.zeros(w.shape) for w in self.weight]
gradient_b = [np.zeros(b.shape) for b in self.bias]
intermediate_list = []
zs = []
intermediate = x
for i in range(2):
b=self.bias[i]
w=self.weight[i]
z = w@intermediate + b
intermediate = sigmoid(z)
zs.append(z)
intermediate_list.append(intermediate)
#隐层->输出层
loss=np.power((intermediate_list[-1]-y),2).sum()
delta = intermediate_list[-1] * (1 - intermediate_list[-1]) * (intermediate_list[-1] - y)
gradient_b[-1] = delta
intermediate_output=intermediate_list[-2].T
delta_w=delta@intermediate_output
gradient_w[-1] = delta_w
#隐层->输入层
z = zs[-2]
a = intermediate_list[-2]
delta = np.dot(self.weight[-1].T, delta) * a * (1 - a)
gradient_b[-2] = delta
[email protected]
gradient_w[-2] = delta_w
return gradient_w, gradient_b,loss
def train(self, training_data,test_data, epoches, batch_size, lr):
n = 60000
for j in range(epoches):
#random.shuffle(train_data)
batches = [
training_data[k:k + batch_size]
for k in range(0, n, batch_size)]
for batch in batches:
batch_gradient_w = [np.zeros(w.shape) for w in self.weight]
batch_gradient_b = [np.zeros(b.shape) for b in self.bias]
batch_loss=0
for x, y in batch:
gradient_w, gradient_b,loss = self.backpropagation(x, y)
batch_gradient_w = [batch_w + w for batch_w, w in zip(batch_gradient_w, gradient_w)]
batch_gradient_b = [batch_b + b for batch_b, b in zip(batch_gradient_b, gradient_b)]
batch_loss+=loss
batch_gradient_w = [w / len(batch) for w in batch_gradient_w]
batch_gradient_b = [b / len(batch) for b in batch_gradient_b]
batch_loss=batch_loss/len(batch)
self.weight = [w - lr * batch_w for w,batch_w in zip(self.weight, batch_gradient_w)]
self.bias = [b - lr * batch_b for b, batch_b in zip(self.bias, batch_gradient_b)]
loss=batch_loss
if test_data:
n_test = len(test_data)
print("Epoch {0}:{1}/{2}".format(j, self.evaluate(test_data), n_test),loss)
else:
print("Epoch {0} complete".format(j))
# def update_mini_batch(self, batch, lr):
# batch_gradient_w = [np.zeros(w.shape) for w in self.weight]
# batch_gradient_b = [np.zeros(b.shape) for b in self.bias]
# batch_loss=0
# # for every sample in current batch
# for x, y in batch:
# # list of every w/b gradient
# # [w1,w2,w3]
# gradient_w, gradient_b,loss = self.backpropagation(x, y)
# batch_gradient_w = [batch_w + w for batch_w, w in zip(batch_gradient_w, gradient_w)]
# batch_gradient_b = [batch_b + b for batch_b, b in zip(batch_gradient_b, gradient_b)]
# batch_loss+=loss
# batch_gradient_w = [w / len(batch) for w in batch_gradient_w]
# batch_gradient_b = [b / len(batch) for b in batch_gradient_b]
# batch_loss=batch_loss/len(batch)
# # w = w - lr * nabla_w
# self.weight = [w - lr * batch_w for w,batch_w in zip(self.weight, batch_gradient_w)]
# self.bias = [b - lr * batch_b for b, batch_b in zip(self.bias, batch_gradient_b)]
# return batch_loss
def evaluate(self, test_data):
sum=0
for x,y in test_data:
pred=np.argmax(self.forward(x.reshape([784,1])))
if(pred==y):
sum+=1
return sum
def convert_to_one_hot(y, C):
return np.eye(C)[y.reshape(-1)].T
def sigmoid(X):
return 1.0 / (1 + np.exp(-X))
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = mnist.load_data()
train_data = []
train_x = x_train.reshape([x_train.shape[0], x_train.shape[1]*x_train.shape[2]])
test_data = []
test_x = x_test.reshape([x_test.shape[0], x_test.shape[1]*x_test.shape[2]])
for i in range(train_x.shape[0]):
train_data.append([train_x[i]/255, convert_to_one_hot(y_train[i], 10)])
for i in range(test_x.shape[0]):
test_data.append([test_x[i]/255, y_test[i]])
demo=NumpyinMnist()
demo.train(train_data,test_data,10,100,0.1)
| [
"[email protected]"
] | |
2f84b258aeb88034ffb315eab22e7c3a81441c17 | c09a8ed8cc41f1c60341aaa4a6c267950022d336 | /database-test/code/app.py | 2bfc3c647d519e86dfbc6a90541e1e17c4f6e196 | [] | no_license | chuiizeet/Flask-bois | 796abc1c135dd2d0032179a818a9227ee919d86e | a7c06e5167e169bc57a83bb4e17bef85e620c2fb | refs/heads/master | 2020-05-30T02:06:43.128860 | 2019-06-25T22:08:12 | 2019-06-25T22:08:12 | 189,491,906 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from secure import authenticate, identity
from user import UserRegister
from item import Item, ItemList
app = Flask(__name__)
app.secret_key = 'chuy'
api = Api(app)
jwt = JWT(app, authenticate, identity)
api.add_resource(Item, '/item/<string:name>')
api.add_resource(ItemList, '/items')
api.add_resource(UserRegister, '/register')
if __name__ == "__main__":
app.run(port=5000, debug=True) | [
"[email protected]"
] | |
74623723dc473959fda9ef462c8209ec5452c655 | 2e44570b5176e8facc05f8b7e58b167b625fdd03 | /wms_project/settings.py | fe971b4e6386c30f9a0b82d056a8fb4d65c3cd8b | [] | no_license | jccode/wms_project | 153e9b6e20c92c03881d88f44b9ab0c5e13851b3 | c994de8ecda39d96e018956d609f12281a55ee85 | refs/heads/master | 2016-09-06T16:03:14.496501 | 2014-03-30T15:49:42 | 2014-03-30T15:49:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,530 | py | # Django settings for wms_project project.
import os
gettext = lambda s: s
PROJECT_PATH = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, 'database.sqlite'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'zh-cn'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '_$6i1+8_z=m3q)uy6a8#q9&k=%$eh!c+m%qcjz8qz#y+14y++l'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'wms_project.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'wms_project.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
'mptt',
'wms',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
DATETIME_FORMAT = 'Y-m-d H:i'
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.