blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d29dee387b69c8558912e9c4fd3c2013e88be9a | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_118/2550.py | 28c00dfbc78318b375a16931dd8cc2af6d52486d | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | #!/usr/bin/env python
import sys
from math import sqrt
def pal(x):
x = str(x)
return x == x[::-1]
if __name__ == "__main__":
t = int(sys.stdin.readline())
for case in range(1, t+1):
count = 0
i, j = [long(c) for c in sys.stdin.readline().split(" ")]
for n in range(i, j+1):
r = sqrt(n)
if r - int(r) != 0.0:
continue
if pal(n) and pal(int(r)):
count += 1
print "Case #%d: %d" % (case, count)
| [
"[email protected]"
]
| |
d32be4c5c1aa79bae358160228b4b8ad3f289a4f | 28ef7c65a5cb1291916c768a0c2468a91770bc12 | /configs/body/2d_kpt_sview_rgb_img/topdown_heatmap/coco/mobilenetv2_coco_384x288.py | b7b54f086ffee4d0e83d3a2fe04f5cf10f68a7ec | [
"Apache-2.0"
]
| permissive | bit-scientist/mmpose | 57464aae1ca87faf5a4669991ae1ea4347e41900 | 9671a12caf63ae5d15a9bebc66a9a2e7a3ce617e | refs/heads/master | 2023-08-03T17:18:27.413286 | 2021-09-29T03:48:37 | 2021-09-29T03:48:37 | 411,549,076 | 0 | 0 | Apache-2.0 | 2021-09-29T06:01:27 | 2021-09-29T06:01:26 | null | UTF-8 | Python | false | false | 4,196 | py | _base_ = ['../../../../_base_/datasets/coco.py']
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=10, metric='mAP', save_best='AP')
optimizer = dict(
type='Adam',
lr=5e-4,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[170, 200])
total_epochs = 210
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=17,
dataset_joints=17,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
],
inference_channel=[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
])
# model settings
model = dict(
type='TopDown',
pretrained='mmcls://mobilenet_v2',
backbone=dict(type='MobileNetV2', widen_factor=1., out_indices=(7, )),
keypoint_head=dict(
type='TopdownHeatmapSimpleHead',
in_channels=1280,
out_channels=channel_cfg['num_output_channels'],
loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
train_cfg=dict(),
test_cfg=dict(
flip_test=True,
post_process='default',
shift_heatmap=True,
modulate_kernel=11))
data_cfg = dict(
image_size=[288, 384],
heatmap_size=[72, 96],
num_output_channels=channel_cfg['num_output_channels'],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
soft_nms=False,
nms_thr=1.0,
oks_thr=0.9,
vis_thr=0.2,
use_gt_bbox=False,
det_bbox_thr=0.0,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownRandomFlip', flip_prob=0.5),
dict(
type='TopDownHalfBodyTransform',
num_joints_half_body=8,
prob_half_body=0.3),
dict(
type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(type='TopDownGenerateTarget', sigma=3),
dict(
type='Collect',
keys=['img', 'target', 'target_weight'],
meta_keys=[
'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
'rotation', 'bbox_score', 'flip_pairs'
]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='TopDownAffine'),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'center', 'scale', 'rotation', 'bbox_score',
'flip_pairs'
]),
]
test_pipeline = val_pipeline
data_root = 'data/coco'
data = dict(
samples_per_gpu=64,
workers_per_gpu=2,
val_dataloader=dict(samples_per_gpu=32),
test_dataloader=dict(samples_per_gpu=32),
train=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
img_prefix=f'{data_root}/train2017/',
data_cfg=data_cfg,
pipeline=train_pipeline,
dataset_info={{_base_.dataset_info}}),
val=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
test=dict(
type='TopDownCocoDataset',
ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
img_prefix=f'{data_root}/val2017/',
data_cfg=data_cfg,
pipeline=val_pipeline,
dataset_info={{_base_.dataset_info}}),
)
| [
"[email protected]"
]
| |
f2746e2381a95d740cf3cd4036e8a08a7bb02ad3 | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/aldryn/django-simple-sso/simple_sso/sso_server/server.py | aa568278cfa75a6e2015dc8bf8be3712ae4e86da | []
| no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 6,308 | py | # -*- coding: utf-8 -*-
import urlparse
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.admin.options import ModelAdmin
from django.core.urlresolvers import reverse
from django.http import (HttpResponseForbidden, HttpResponseBadRequest, HttpResponseRedirect, QueryDict)
from django.utils import timezone
from django.views.generic.base import View
from itsdangerous import URLSafeTimedSerializer
from simple_sso.sso_server.models import Token, Consumer
import datetime
import urllib
from webservices.models import Provider
from webservices.sync import provider_for_django
class BaseProvider(Provider):
max_age = 5
def __init__(self, server):
self.server = server
def get_private_key(self, public_key):
try:
self.consumer = Consumer.objects.get(public_key=public_key)
except Consumer.DoesNotExist:
return None
return self.consumer.private_key
class RequestTokenProvider(BaseProvider):
def provide(self, data):
redirect_to = data['redirect_to']
token = Token.objects.create(consumer=self.consumer, redirect_to=redirect_to)
return {'request_token': token.request_token}
class AuthorizeView(View):
"""
The client get's redirected to this view with the `request_token` obtained
by the Request Token Request by the client application beforehand.
This view checks if the user is logged in on the server application and if
that user has the necessary rights.
If the user is not logged in, the user is prompted to log in.
"""
server = None
def get(self, request):
request_token = request.GET.get('token', None)
if not request_token:
return self.missing_token_argument()
try:
self.token = Token.objects.select_related('consumer').get(request_token=request_token)
except Token.DoesNotExist:
return self.token_not_found()
if not self.check_token_timeout():
return self.token_timeout()
self.token.refresh()
if request.user.is_authenticated():
return self.handle_authenticated_user()
else:
return self.handle_unauthenticated_user()
def missing_token_argument(self):
return HttpResponseBadRequest('Token missing')
def token_not_found(self):
return HttpResponseForbidden('Token not found')
def token_timeout(self):
return HttpResponseForbidden('Token timed out')
def check_token_timeout(self):
delta = timezone.now() - self.token.timestamp
if delta > self.server.token_timeout:
self.token.delete()
return False
else:
return True
def handle_authenticated_user(self):
if self.server.has_access(self.request.user, self.token.consumer):
return self.success()
else:
return self.access_denied()
def handle_unauthenticated_user(self):
next = '%s?%s' % (self.request.path, urllib.urlencode([('token', self.token.request_token)]))
url = '%s?%s' % (reverse(self.server.auth_view_name), urllib.urlencode([('next', next)]))
return HttpResponseRedirect(url)
def access_denied(self):
return HttpResponseForbidden("Access denied")
def success(self):
self.token.user = self.request.user
self.token.save()
serializer = URLSafeTimedSerializer(self.token.consumer.private_key)
parse_result = urlparse.urlparse(self.token.redirect_to)
query_dict = QueryDict(parse_result.query, mutable=True)
query_dict['access_token'] = serializer.dumps(self.token.access_token)
url = urlparse.urlunparse((parse_result.scheme, parse_result.netloc, parse_result.path, '', query_dict.urlencode(), ''))
return HttpResponseRedirect(url)
class VerificationProvider(BaseProvider, AuthorizeView):
def provide(self, data):
token = data['access_token']
try:
self.token = Token.objects.select_related('user').get(access_token=token, consumer=self.consumer)
except Token.DoesNotExist:
return self.token_not_found()
if not self.check_token_timeout():
return self.token_timeout()
if not self.token.user:
return self.token_not_bound()
extra_data = data.get('extra_data', None)
return self.server.get_user_data(
self.token.user, self.consumer, extra_data=extra_data)
def token_not_bound(self):
return HttpResponseForbidden("Invalid token")
class ConsumerAdmin(ModelAdmin):
readonly_fields = ['public_key', 'private_key']
class Server(object):
request_token_provider = RequestTokenProvider
authorize_view = AuthorizeView
verification_provider = VerificationProvider
token_timeout = datetime.timedelta(minutes=5)
client_admin = ConsumerAdmin
auth_view_name = 'django.contrib.auth.views.login'
def __init__(self, **kwargs):
for key, value in kwargs.items():
setattr(self, key, value)
self.register_admin()
def register_admin(self):
admin.site.register(Consumer, self.client_admin)
def has_access(self, user, consumer):
return True
def get_user_extra_data(self, user, consumer, extra_data):
raise NotImplementedError()
def get_user_data(self, user, consumer, extra_data=None):
user_data = {
'username': user.username,
'email': user.email,
'first_name': user.first_name,
'last_name': user.last_name,
'is_staff': False,
'is_superuser': False,
'is_active': user.is_active,
}
if extra_data:
user_data['extra_data'] = self.get_user_extra_data(
user, consumer, extra_data)
return user_data
def get_urls(self):
return patterns('',
url(r'^request-token/$', provider_for_django(self.request_token_provider(server=self)), name='simple-sso-request-token'),
url(r'^authorize/$', self.authorize_view.as_view(server=self), name='simple-sso-authorize'),
url(r'^verify/$', provider_for_django(self.verification_provider(server=self)), name='simple-sso-verify'),
)
| [
"[email protected]"
]
| |
de06873e9ac5e35c2c78389c58693ec007c55023 | 436177bf038f9941f67e351796668700ffd1cef2 | /venv/Lib/site-packages/sklearn/feature_selection/tests/test_feature_select.py | d58dd44faa8189c222dc572ef2799ff1e0cec20c | []
| no_license | python019/matplotlib_simple | 4359d35f174cd2946d96da4d086026661c3d1f9c | 32e9a8e773f9423153d73811f69822f9567e6de4 | refs/heads/main | 2023-08-22T18:17:38.883274 | 2021-10-07T15:55:50 | 2021-10-07T15:55:50 | 380,471,961 | 29 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,540 | py | """
Todo: cross-check the F-value with stats model
"""
import itertools
import warnings
import numpy as np
from scipy import stats, sparse
import pytest
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_warns
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_warns_message
from sklearn.utils import safe_mask
from sklearn.datasets import make_classification, make_regression
from sklearn.feature_selection import (
chi2, f_classif, f_oneway, f_regression, mutual_info_classif,
mutual_info_regression, SelectPercentile, SelectKBest, SelectFpr,
SelectFdr, SelectFwe, GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
# Test that our f_oneway gives the same result as scipy.stats
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert np.allclose(f, f2)
assert np.allclose(pv, pv2)
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
# Test whether the F test yields meaningful results
# on a simple simulated regression problem
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
# with centering, compare with sparse
F, pv = f_regression(X, y, center=True)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=True)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
# Test whether f_regression returns the same value
# for any numeric data_type
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
# Test whether f_regression preserves dof according to 'center' argument
# We use two centered variates so we have a simple relationship between
# F-score with variates centering and F-score without variates centering.
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
# Test whether the F test yields meaningful results
# on a simple simulated classification problem
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert (F > 0).all()
assert (pv > 0).all()
assert (pv < 1).all()
assert (pv[:5] < 0.05).all()
assert (pv[5:] > 1.e-4).all()
def test_select_percentile_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the percentile heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert sparse.issparse(X_r2inv)
support_mask = safe_mask(X_r2inv, support)
assert X_r2inv.shape == X.shape
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert X_r2inv.getnnz() == X_r.getnnz()
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the k best heuristic
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
# Test whether k="all" correctly returns all features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
# Test whether k=0 correctly returns no features.
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert X_selected.shape == (20, 0)
def test_select_heuristics_classif():
# Test whether the relative univariate feature selection
# gets the correct items in a simple classification problem
# with the fdr, fwe and fpr heuristics
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_classif, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_almost_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the percentile heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
# Test whether the relative univariate feature selection
# selects all features when '100%' is asked.
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
with pytest.raises(ValueError):
SelectPercentile(percentile=-1).fit(X, y)
with pytest.raises(ValueError):
SelectPercentile(percentile=101).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='percentile', param=-1).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='percentile', param=101).fit(X, y)
def test_select_kbest_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the k best heuristic
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_heuristics_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fpr, fdr or fwe heuristics
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0, noise=10)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
gtruth = np.zeros(20)
gtruth[:5] = 1
for mode in ['fdr', 'fpr', 'fwe']:
X_r2 = GenericUnivariateSelect(
f_regression, mode=mode, param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
assert_array_equal(support[:5], np.ones((5, ), dtype=bool))
assert np.sum(support[5:] == 1) < 3
def test_boundary_case_ch2():
# Test boundary case, and always aim to select 1 feature.
X = np.array([[10, 20], [20, 20], [20, 30]])
y = np.array([[1], [0], [0]])
scores, pvalues = chi2(X, y)
assert_array_almost_equal(scores, np.array([4., 0.71428571]))
assert_array_almost_equal(pvalues, np.array([0.04550026, 0.39802472]))
filter_fdr = SelectFdr(chi2, alpha=0.1)
filter_fdr.fit(X, y)
support_fdr = filter_fdr.get_support()
assert_array_equal(support_fdr, np.array([True, False]))
filter_kbest = SelectKBest(chi2, k=1)
filter_kbest.fit(X, y)
support_kbest = filter_kbest.get_support()
assert_array_equal(support_kbest, np.array([True, False]))
filter_percentile = SelectPercentile(chi2, percentile=50)
filter_percentile.fit(X, y)
support_percentile = filter_percentile.get_support()
assert_array_equal(support_percentile, np.array([True, False]))
filter_fpr = SelectFpr(chi2, alpha=0.1)
filter_fpr.fit(X, y)
support_fpr = filter_fpr.get_support()
assert_array_equal(support_fpr, np.array([True, False]))
filter_fwe = SelectFwe(chi2, alpha=0.1)
filter_fwe.fit(X, y)
support_fwe = filter_fwe.get_support()
assert_array_equal(support_fwe, np.array([True, False]))
@pytest.mark.parametrize("alpha", [0.001, 0.01, 0.1])
@pytest.mark.parametrize("n_informative", [1, 5, 10])
def test_select_fdr_regression(alpha, n_informative):
# Test that fdr heuristic actually has low FDR.
def single_fdr(alpha, n_informative, random_state):
X, y = make_regression(n_samples=150, n_features=20,
n_informative=n_informative, shuffle=False,
random_state=random_state, noise=10)
with warnings.catch_warnings(record=True):
# Warnings can be raised when no features are selected
# (low alpha or very noisy data)
univariate_filter = SelectFdr(f_regression, alpha=alpha)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=alpha).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
num_false_positives = np.sum(support[n_informative:] == 1)
num_true_positives = np.sum(support[:n_informative] == 1)
if num_false_positives == 0:
return 0.
false_discovery_rate = (num_false_positives /
(num_true_positives + num_false_positives))
return false_discovery_rate
# As per Benjamini-Hochberg, the expected false discovery rate
# should be lower than alpha:
# FDR = E(FP / (TP + FP)) <= alpha
false_discovery_rate = np.mean([single_fdr(alpha, n_informative,
random_state) for
random_state in range(100)])
assert alpha >= false_discovery_rate
# Make sure that the empirical false discovery rate increases
# with alpha:
if false_discovery_rate != 0:
assert false_discovery_rate > alpha / 10
def test_select_fwe_regression():
# Test whether the relative univariate feature selection
# gets the correct items in a simple regression problem
# with the fwe heuristic
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=bool))
assert np.sum(support[5:] == 1) < 2
def test_selectkbest_tiebreaking():
# Test whether SelectKBest actually selects k features in case of ties.
# Prior to 0.11, SelectKBest would return more features than requested.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
# Test if SelectPercentile selects the right n_features in case of ties.
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert X1.shape[1] == 1
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert X2.shape[1] == 2
assert_best_scores_kept(sel)
def test_tied_pvalues():
# Test whether k-best and percentiles work with tied pvalues from chi2.
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert Xt.shape == (2, 2)
assert 9998 not in Xt
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert Xt.shape == (2, 2)
assert 9998 not in Xt
def test_scorefunc_multilabel():
# Test whether k-best and percentiles works with multilabels with chi2.
X = np.array([[10000, 9999, 0], [100, 9999, 0], [1000, 99, 0]])
y = [[1, 1], [0, 1], [1, 0]]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert Xt.shape == (3, 2)
assert 0 not in Xt
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert Xt.shape == (3, 2)
assert 0 not in Xt
def test_tied_scores():
# Test for stable sorting in k-best with tied scores.
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([[0, 1, 2]])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
# Assert that SelectKBest and SelectPercentile can handle NaNs.
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, k=2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
with pytest.raises(TypeError):
SelectFeatures(score_func=10).fit(X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
with pytest.raises(ValueError):
SelectKBest(k=-1).fit(X, y)
with pytest.raises(ValueError):
SelectKBest(k=4).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='k_best', param=-1).fit(X, y)
with pytest.raises(ValueError):
GenericUnivariateSelect(mode='k_best', param=4).fit(X, y)
def test_f_classif_constant_feature():
# Test that f_classif warns if a feature is constant throughout.
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert X_selected.shape == (40, 0)
def test_mutual_info_classif():
X, y = make_classification(n_samples=100, n_features=5,
n_informative=1, n_redundant=1,
n_repeated=0, n_classes=2,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_classif, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_classif, percentile=40)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
mutual_info_classif, mode='percentile', param=40).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(5)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
def test_mutual_info_regression():
X, y = make_regression(n_samples=100, n_features=10, n_informative=2,
shuffle=False, random_state=0, noise=10)
# Test in KBest mode.
univariate_filter = SelectKBest(mutual_info_regression, k=2)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
mutual_info_regression, mode='k_best', param=2).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
# Test in Percentile mode.
univariate_filter = SelectPercentile(mutual_info_regression, percentile=20)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(mutual_info_regression, mode='percentile',
param=20).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(10)
gtruth[:2] = 1
assert_array_equal(support, gtruth)
| [
"[email protected]"
]
| |
1bc5bfe0093dafca4e694e1f48a3517bedeab02c | 5cd6a7fa7be3b00ff63e60935bc1be9fa1cfebf4 | /projects/mid_atlantic/study/plot_FigS3_Distance_v_Depth_By_State.py | 32b02836b5905138d01abb24b202eb0527cf62b4 | [
"MIT"
]
| permissive | EnergyModels/caes | 214e1c7cded4498f33670da7eeebccbaa665e930 | 5e994c198657226925161db1980ebfa704d0c90b | refs/heads/master | 2023-08-23T15:05:53.594530 | 2021-11-06T01:00:57 | 2021-11-06T01:00:57 | 261,201,284 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,026 | py | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
df = pd.read_csv('all_analysis.csv')
# f, a = plt.subplots(2,1)
# a = a.ravel()
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='feasible_fr', hue='NEAR_FC', ax=a[0])
#
# sns.scatterplot(data=df, x='NEAR_DIST',y='RASTERVALU', hue='NEAR_FC', ax=a[1])
# conversions and column renaming
df.loc[:, 'Distance to shore (km)'] = df.loc[:, 'NEAR_DIST'] / 1000.0
df.loc[:, 'Water depth (m)'] = df.loc[:, 'RASTERVALU']
df.loc[:, 'Feasibility (%)'] = df.loc[:, 'feasible_fr'] * 100.0
df.loc[:, 'Formation (-)'] = df.loc[:, 'formation']
df.loc[:, 'Nearest State (-)'] = df.loc[:, 'NEAR_FC']
loc_dict = {'VA_shore': 'Virginia', 'MD_shore': 'Maryland', 'NJ_shore': 'New Jersey', 'DE_shore': 'Delaware',
'NY_shore': 'New York', 'MA_shore': 'Massachusetts', 'RI_shore': 'Rhode Island'}
formation_dict = {'LK1': 'Lower Cretaceous', 'MK1-3': 'Middle Cretaceous', 'UJ1': 'Upper Jurassic'}
# rename
for loc in df.loc[:, 'Nearest State (-)'].unique():
ind = df.loc[:, 'Nearest State (-)'] == loc
df.loc[ind, 'Nearest State (-)'] = loc_dict[loc]
# rename
for formation in df.loc[:, 'Formation (-)'].unique():
ind = df.loc[:, 'Formation (-)'] == formation
df.loc[ind, 'Formation (-)'] = formation_dict[formation]
# Filter data with feasibility greater than 0.8
# df = df[df.loc[:,'Feasibility (%)']>=0.8]
# Filter data with mean RTE greater than 0.5
df = df[df.loc[:, 'RTE_mean'] >= 0.5]
# sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
# size='Feasibility (%)', style='Formation (-)')
#
# # a[1].set_ylim(top=0.0,bottom=-100.0)
#
# sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
# size='Feasibility (%)', style='Formation (-)', ax=a[1])
#
# a[1].set_xlim(left=0.0,right=100.0)
# a[1].set_ylim(top=0.0,bottom=-100.0)
# create figure
f, a = plt.subplots(1, 1)
axins = zoomed_inset_axes(a, zoom=2.2, loc='upper center', bbox_to_anchor=(0.5, -0.2), bbox_transform=a.transAxes)
# Main plot
sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
style='Formation (-)', ax=a)
a.set_xlim(left=0.0, right=300.0)
a.set_ylim(top=0, bottom=-400.0)
# a.set_yscale('symlog')
# Inset
x_lims = [0.0, 100.0]
y_lims = [0, -60.0]
rect = plt.Rectangle((x_lims[0] + 1, y_lims[0]), x_lims[1] - x_lims[0] + 1, y_lims[1] - y_lims[0], fill=False,
facecolor="black",
edgecolor='black', linestyle='--')
a.add_patch(rect)
sns.scatterplot(data=df, x='Distance to shore (km)', y='Water depth (m)', hue='Nearest State (-)',
style='Formation (-)', legend=False, ax=axins)
axins.set_xlim(left=x_lims[0], right=x_lims[1])
axins.set_ylim(top=y_lims[0], bottom=y_lims[1])
# axins.set_yscale('symlog')
axins.yaxis.set_major_locator(plt.MaxNLocator(3))
a.legend(bbox_to_anchor=(1.025, 0.0), loc="center left", ncol=1)
a.text(-0.1, 1.0, 'a', horizontalalignment='center', verticalalignment='center',
transform=a.transAxes, fontsize='medium', fontweight='bold')
axins.text(-0.3, 1.0, 'b', horizontalalignment='center', verticalalignment='center',
transform=axins.transAxes, fontsize='medium', fontweight='bold')
# Add rectangle that represents subplot2
# Column width guidelines https://www.elsevier.com/authors/author-schemas/artwork-and-media-instructions/artwork-sizing
# Single column: 90mm = 3.54 in
# 1.5 column: 140 mm = 5.51 in
# 2 column: 190 mm = 7.48 i
width = 7.48 # inches
height = 7.0 # inches
# Set size
f.set_size_inches(width, height)
plt.subplots_adjust(top=0.95,
bottom=0.5,
left=0.12,
right=0.7,
hspace=0.2,
wspace=0.2)
# save
plt.savefig('FigS3_Distance_v_Depth_By_State.png', dpi=300)
| [
"[email protected]"
]
| |
164e5493f6758c339a9e2ad856a3766537c455d0 | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.06_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=65/params.py | 1784956005fe6b3ad3c6eecf934f47a007d14984 | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.662643',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.06',
'scheduler': 'RUN',
'trial': 65,
'utils': 'uni-medium-3'}
| [
"[email protected]"
]
| |
ac3142959ea8cad01113bded21db613df639e564 | 9da8754002fa402ad8e6f25659978bd269bbcec8 | /src/326A/test_cdf_326A.py | 64e36fcaf81aaefde3dcff7e62890268fa2c84a8 | [
"MIT"
]
| permissive | kopok2/CodeforcesSolutionsPython | a00f706dbf368ba0846c8ae86d4145b5dd3e1613 | 35bec0dbcff47765b123b5fe60476014376153df | refs/heads/master | 2023-02-02T03:08:22.097651 | 2020-12-17T22:00:50 | 2020-12-17T22:00:50 | 196,035,812 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | import unittest
from unittest.mock import patch
from cdf_326A import CodeforcesTask326ASolution
class TestCDF326A(unittest.TestCase):
if __name__ == "__main__":
unittest.main()
| [
"[email protected]"
]
| |
14397fd7d43c0fec589ab90b3be4cf83ca04e4fb | 5ab4ed1e8eb7f942db03eb06a56f2dc0fb8056f8 | /code/process_results/2020/05/7_8_finetune_sparse_facto_grid_lr.py | d90ab7d2fb6fdab6387af977bfd1f0472876558a | [
"MIT"
]
| permissive | lucgiffon/psm-nets | b4f443ff47f4b423c3494ff944ef0dae68badd9d | dec43c26281febf6e5c8b8f42bfb78098ae7101d | refs/heads/main | 2023-05-04T17:56:11.122144 | 2021-05-28T16:31:34 | 2021-05-28T16:31:34 | 337,717,248 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,794 | py | from collections import defaultdict
import keras.backend as K
import pickle
import pathlib
import pandas as pd
import scipy.special
import scipy.stats
from keras.models import Model
import gc
import palmnet.hunt
from palmnet.core.faustizer import Faustizer
from palmnet.core.layer_replacer_faust import LayerReplacerFaust
from palmnet.core.layer_replacer_palm import LayerReplacerPalm
from palmnet.data import param_training, image_data_generator_cifar_svhn, image_data_generator_mnist
from palmnet.experiments.utils import get_line_of_interest, ParameterManager
from palmnet.utils import get_sparsity_pattern, get_nb_learnable_weights, get_nb_learnable_weights_from_model
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import numpy as np
import logging
from palmnet.core import palminizable
from palmnet.core.palminizer import Palminizer
palminizable.Palminizer = Palminizer
import sys
sys.modules["palmnet.core.palminize"] = palminizable
from skluc.utils import logger, log_memory_usage
import keras
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
logger.setLevel(logging.DEBUG)
def get_singular_values_info(matrix):
U, S, V = np.linalg.svd(matrix)
mean_sv = np.mean(S)
softmax_S = scipy.special.softmax(S)
entropy_S = scipy.stats.entropy(softmax_S)
entropy_sv = entropy_S
nb_sv = len(S)
entropy_sv_normalized = entropy_S / scipy.stats.entropy(scipy.special.softmax(np.ones(len(S))))
percent_sv_above_mean = np.sum(S > mean_sv) / len(S)
return entropy_sv, nb_sv, entropy_sv_normalized, percent_sv_above_mean
def get_df_from_expe_path(expe_path):
src_dir = root_source_dir / expe_path
df = get_df(src_dir)
df = df.assign(results_dir=[str(src_dir.absolute())] * len(df))
df = df.rename(columns={"--tol": "--delta-threshold"})
return df
columns_not_to_num = ['hash', 'output_file_csvcbprinter', "--use-clr",
"--input-dir", "input_model_path", "output_file_csvcvprinter",
"output_file_finishedprinter", "output_file_layerbylayer",
"output_file_modelprinter", "output_file_notfinishedprinter",
"output_file_resprinter", "output_file_tensorboardprinter", "results_dir"]
def cast_to_num(df):
for col in df.columns.difference(columns_not_to_num):
if col in df.columns.values:
df.loc[:, col] = df.loc[:, col].apply(pd.to_numeric, errors='coerce')
return df
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/")
expe_path = "2020/05/7_8_finetune_sparse_facto_not_log_all_grid_lr"
lst_path_finetune = [
"2020/05/7_8_finetune_sparse_facto_not_log_all_grid_lr",
"2020/05/7_8_finetune_sparse_facto_not_log_all_grid_lr_only_mask",
"2020/05/11_12_finetune_sparse_facto_resnet_grid_lr",
"2020/05/11_12_finetune_sparse_facto_not_log_resnet_not_only_mask_grid_lr",
"2020/07/11_12_finetune_fix_only_mask_grid_lr"
]
lst_path_compression = [
"2020/05/3_4_compression_palm_not_log_all",
]
df_finetune = pd.concat(list(map(get_df_from_expe_path, lst_path_finetune)))
# df_finetune = get_df_from_expe_path(lst_path_finetune[0])
df_finetune = df_finetune.dropna(subset=["failure"])
df_finetune = df_finetune[df_finetune["failure"] == False]
df_finetune = df_finetune.drop(columns="oar_id").drop_duplicates()
df_finetune = cast_to_num(df_finetune)
df_finetune = df_finetune[~df_finetune["test_accuracy_finetuned_model"].isnull()]
df_compression = pd.concat(list(map(get_df_from_expe_path, lst_path_compression)))
# df_compression = get_df_from_expe_path(lst_path_compression[0])
df_compression = cast_to_num(df_compression)
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/processed/")
output_dir = root_output_dir / expe_path
output_dir.mkdir(parents=True, exist_ok=True)
dct_attributes = defaultdict(lambda: [])
dct_results_matrices = defaultdict(lambda: [])
length_df = len(df_finetune)
for idx, (_, row) in enumerate(df_finetune.iterrows()):
# if df_results_tmp is not None and row["hash"] in df_results_tmp["hash"].values:
# continue
if np.isnan(row["test_loss_finetuned_model"]):
continue
log_memory_usage("Start loop")
print("row {}/{}".format(idx, length_df))
dct_attributes["idx-expe"].append(idx)
dct_attributes["hash"].append(row["hash"])
# get corresponding row in the palminize results directory #
keys_of_interest = ['--cifar10',
'--cifar10-vgg19',
'--cifar100',
'--cifar100-vgg19',
'--delta-threshold',
'--hierarchical',
'--mnist',
'--mnist-lenet',
'--nb-iteration-palm',
'--sparsity-factor',
'--svhn',
'--svhn-vgg19',
'--test-data',
'--test-model',
"--nb-factor"
]
if row["--cifar100-resnet50"] or row["--cifar100-resnet20"]:
keys_of_interest.extend([
'--cifar100-resnet50',
'--cifar100-resnet20',
])
row_before_finetune = get_line_of_interest(df_compression, keys_of_interest, row).iloc[0]
# this is the row of results for the model before finetuning
############################################
# Global informations about the experiment #
############################################
if row["--cifar10"]:
dct_attributes["dataset"].append("cifar10")
elif row["--cifar100"]:
dct_attributes["dataset"].append("cifar100")
elif row["--mnist"]:
dct_attributes["dataset"].append("mnist")
elif row["--svhn"]:
dct_attributes["dataset"].append("svhn")
else:
raise ValueError("Unknown dataset")
if row["--cifar100-vgg19"] or row["--cifar10-vgg19"] or row["--svhn-vgg19"]:
dct_attributes["model"].append("vgg19")
elif row["--mnist-lenet"]:
dct_attributes["model"].append("lenet")
elif row["--mnist-500"]:
dct_attributes["model"].append("fc500")
elif row["--cifar100-resnet20"]:
dct_attributes["model"].append("resnet20")
elif row["--cifar100-resnet50"]:
dct_attributes["model"].append("resnet50")
elif row["--cifar100-resnet20-new"]:
dct_attributes["model"].append("resnet20")
elif row["--cifar100-resnet50-new"]:
dct_attributes["model"].append("resnet50")
else:
raise ValueError("Unknown model")
if row["faust"]:
dct_attributes["method"].append("faust")
elif row["palm"]:
dct_attributes["method"].append("pyqalm")
else:
raise NotImplementedError
# palm informations #
dct_attributes["delta-threshold"].append(float(row["--delta-threshold"]))
dct_attributes["hierarchical"].append(bool(row["--hierarchical"]))
dct_attributes["nb-factor"].append(int(row["--nb-factor"]) if not np.isnan(row["--nb-factor"]) else np.nan)
dct_attributes["nb-iteration-palm"].append(int(row["--nb-iteration-palm"]))
dct_attributes["sparsity-factor"].append(int(row["--sparsity-factor"]))
# finetuning informations
dct_attributes["use-clr"].append(row["--use-clr"]) # this must be first because used in other attributes
dct_attributes["only-mask"].append(bool(row["--only-mask"]))
dct_attributes["keep-last-layer"].append(bool(row["--keep-last-layer"]))
dct_attributes["keep-first-layer"].append(bool(row["--keep-first-layer"]))
dct_attributes["only-dense"].append(bool(row["--only-dense"]))
# beware of this line here because the params_optimizer may change between experiments
dct_attributes["epoch-step-size"].append(float(row["--epoch-step-size"]) if dct_attributes["use-clr"][-1] else np.nan)
dct_attributes["actual-batch-size"].append(int(row["actual-batch-size"]) if row["actual-batch-size"] is not None else None)
dct_attributes["actual-nb-epochs"].append(int(row["actual-nb-epochs"]) if row["actual-nb-epochs"] is not None else None)
dct_attributes["actual-min-lr"].append(float(row["actual-min-lr"]) if row["actual-min-lr"] is not None else None)
dct_attributes["actual-max-lr"].append(float(row["actual-max-lr"]) if row["actual-max-lr"] is not None else None)
dct_attributes["actual-lr"].append(float(row["actual-lr"]) if row["actual-lr"] is not None else None)
# score informations
dct_attributes["base-model-score"].append(float(row["test_accuracy_base_model"]))
dct_attributes["before-finetune-score"].append(float(row["test_accuracy_compressed_model"]))
dct_attributes["finetuned-score"].append(float(row["test_accuracy_finetuned_model"]))
dct_attributes["base-model-loss"].append(float(row["test_loss_base_model"]))
dct_attributes["before-finetune-loss"].append(float(row["test_loss_compressed_model"]))
dct_attributes["finetuned-loss"].append(float(row["test_loss_finetuned_model"]))
dct_attributes["finetuned-score-val"].append(float(row["val_accuracy_finetuned_model"]))
# store path informations
path_model_compressed = pathlib.Path(row_before_finetune["results_dir"]) / row_before_finetune["output_file_modelprinter"]
path_history = pathlib.Path(row["results_dir"]) / row["output_file_csvcbprinter"]
dct_attributes["path-learning-history"].append(path_history)
dct_attributes["path-model-compressed"].append(path_model_compressed)
##############################
# Layer by Layer information #
##############################
nb_param_dense_base = 0
nb_param_dense_compressed = 0
nb_param_conv_base = 0
nb_param_conv_compressed = 0
if type(row["output_file_layerbylayer"]) == str:
dct_attributes["nb-param-base-total"].append(int(row["base_model_nb_param"]))
dct_attributes["nb-param-compressed-total"].append(int(row["new_model_nb_param"]))
dct_attributes["param-compression-rate-total"].append(row["base_model_nb_param"]/row["new_model_nb_param"])
path_layer_by_layer = pathlib.Path(row["results_dir"]) / row["output_file_layerbylayer"]
df_csv_layerbylayer = pd.read_csv(str(path_layer_by_layer))
for idx_row_layer, row_layer in df_csv_layerbylayer.iterrows():
dct_results_matrices["idx-expe"].append(idx)
dct_results_matrices["model"].append(dct_attributes["model"][-1])
layer_name_compressed = row_layer["layer-name-compressed"]
is_dense = "sparse_factorisation_dense" in layer_name_compressed
dct_results_matrices["layer-name-base"].append(row_layer["layer-name-base"])
dct_results_matrices["layer-name-compressed"].append(row_layer["layer-name-compressed"])
dct_results_matrices["idx-layer"].append(row_layer["idx-layer"])
dct_results_matrices["data"].append(dct_attributes["dataset"][-1])
dct_results_matrices["keep-last-layer"].append(dct_attributes["keep-last-layer"][-1])
dct_results_matrices["use-clr"].append(dct_attributes["use-clr"][-1])
dct_results_matrices["diff-approx"].append(row_layer["diff-approx"])
# get nb val base layer and comrpessed layer
dct_results_matrices["nb-non-zero-base"].append(row_layer["nb-non-zero-base"])
dct_results_matrices["nb-non-zero-compressed"].append(row_layer["nb-non-zero-compressed"])
dct_results_matrices["nb-non-zero-compression-rate"].append(row_layer["nb-non-zero-compression-rate"])
if is_dense:
nb_param_dense_base += row_layer["nb-non-zero-base"]
nb_param_dense_compressed += row_layer["nb-non-zero-compressed"]
else:
nb_param_conv_base += row_layer["nb-non-zero-base"]
nb_param_conv_compressed += row_layer["nb-non-zero-compressed"]
# get palm setting options
dct_results_matrices["nb-factor-param"].append(dct_attributes["nb-factor"][-1])
# dct_results_matrices["nb-factor-actual"].append(len(sparsity_patterns))
dct_results_matrices["sparsity-factor"].append(dct_attributes["sparsity-factor"][-1])
dct_results_matrices["hierarchical"].append(dct_attributes["hierarchical"][-1])
else:
# continue
palmnet.hunt.show_most_common_types(limit=20)
log_memory_usage("Before pickle")
layer_replacer = LayerReplacerFaust(only_mask=False, keep_last_layer=dct_attributes["keep-last-layer"][-1], path_checkpoint_file=path_model_compressed, sparse_factorizer=Faustizer())
layer_replacer.load_dct_name_compression()
log_memory_usage("After pickle")
paraman = ParameterManager(row.to_dict())
base_model = paraman.get_model()
palmnet.hunt.show_most_common_types(limit=20)
compressed_model = layer_replacer.transform(base_model)
palmnet.hunt.show_most_common_types(limit=20)
log_memory_usage("After transform")
if len(base_model.layers) < len(compressed_model.layers):
base_model = Model(inputs=base_model.inputs, outputs=base_model.outputs)
assert len(base_model.layers) == len(compressed_model.layers)
# model complexity informations obtained from the reconstructed model
nb_learnable_weights_base_model = get_nb_learnable_weights_from_model(base_model)
nb_learnable_weights_compressed_model = get_nb_learnable_weights_from_model(compressed_model)
dct_attributes["nb-param-base-total"].append(int(nb_learnable_weights_base_model))
dct_attributes["nb-param-compressed-total"].append(int(nb_learnable_weights_compressed_model))
dct_attributes["param-compression-rate-total"].append(nb_learnable_weights_base_model/nb_learnable_weights_compressed_model)
dct_name_facto = None
dct_name_facto = layer_replacer.dct_name_compression
for idx_layer, base_layer in enumerate(base_model.layers):
log_memory_usage("Start secondary loop")
sparse_factorization = dct_name_facto.get(base_layer.name, (None, None))
if sparse_factorization != (None, None) and sparse_factorization != None:
print(base_layer.name)
compressed_layer = None
compressed_layer = compressed_model.layers[idx_layer]
# get informations to identify the layer (and do cross references)
dct_results_matrices["idx-expe"].append(idx)
dct_results_matrices["model"].append(dct_attributes["model"][-1])
dct_results_matrices["layer-name-base"].append(base_layer.name)
layer_name_compressed = compressed_layer.name
is_dense = "sparse_factorisation_dense" in layer_name_compressed
dct_results_matrices["layer-name-compressed"].append(compressed_layer.name)
dct_results_matrices["idx-layer"].append(idx_layer)
dct_results_matrices["data"].append(dct_attributes["dataset"][-1])
dct_results_matrices["keep-last-layer"].append(dct_attributes["keep-last-layer"][-1])
dct_results_matrices["use-clr"].append(dct_attributes["use-clr"][-1])
# get sparse factorization
scaling = sparse_factorization['lambda']
factors = Faustizer.get_factors_from_op_sparsefacto(sparse_factorization['sparse_factors'])
sparsity_patterns = [get_sparsity_pattern(w) for w in factors]
factor_data = factors
# rebuild full matrix to allow comparisons
reconstructed_matrix = np.linalg.multi_dot(factors) * scaling
base_matrix = np.reshape(base_layer.get_weights()[0], reconstructed_matrix.shape)
# normalized approximation errors
diff = np.linalg.norm(base_matrix - reconstructed_matrix) / np.linalg.norm(base_matrix)
dct_results_matrices["diff-approx"].append(diff)
# # measures "singular values" #
# # base matrix
# base_entropy_sv, base_nb_sv, base_entropy_sv_normalized, base_percent_sv_above_mean = get_singular_values_info(base_matrix)
# dct_results_matrices["entropy-base-sv"].append(base_entropy_sv)
# dct_results_matrices["nb-sv-base"].append(base_nb_sv)
# dct_results_matrices["entropy-base-sv-normalized"].append(base_entropy_sv_normalized)
# dct_results_matrices["percent-sv-base-above-mean"].append(base_percent_sv_above_mean)
# # reconstructed matrix
# recons_entropy_sv, recons_nb_sv, recons_entropy_sv_normalized, recons_percent_sv_above_mean = get_singular_values_info(reconstructed_matrix)
# dct_results_matrices["entropy-recons-sv"].append(recons_entropy_sv)
# dct_results_matrices["nb-sv-recons"].append(recons_nb_sv)
# dct_results_matrices["entropy-recons-sv-normalized"].append(recons_entropy_sv_normalized)
# dct_results_matrices["percent-sv-recons-above-mean"].append(recons_percent_sv_above_mean)
# complexity analysis #
# get nb val of the full reconstructed matrix
sparsity_pattern_reconstructed = get_sparsity_pattern(reconstructed_matrix)
nb_non_zero = int(np.sum(sparsity_pattern_reconstructed))
size_bias = len(base_layer.get_weights()[-1]) if base_layer.use_bias else 0
# dct_results_matrices["nb-non-zero-reconstructed"].append(nb_non_zero + size_bias)
# get nb val base layer and comrpessed layers
nb_weights_base_layer = get_nb_learnable_weights(base_layer)
dct_results_matrices["nb-non-zero-base"].append(nb_weights_base_layer)
nb_weights_compressed_layer = get_nb_learnable_weights(compressed_layer)
dct_results_matrices["nb-non-zero-compressed"].append(nb_weights_compressed_layer)
dct_results_matrices["nb-non-zero-compression-rate"].append(nb_weights_base_layer/nb_weights_compressed_layer)
if is_dense:
nb_param_dense_base += nb_weights_base_layer
nb_param_dense_compressed += nb_weights_compressed_layer
else:
nb_param_conv_base += nb_weights_base_layer
nb_param_conv_compressed += nb_weights_compressed_layer
# get palm setting options
dct_results_matrices["nb-factor-param"].append(dct_attributes["nb-factor"][-1])
# dct_results_matrices["nb-factor-actual"].append(len(sparsity_patterns))
dct_results_matrices["sparsity-factor"].append(dct_attributes["sparsity-factor"][-1])
dct_results_matrices["hierarchical"].append(dct_attributes["hierarchical"][-1])
gc.collect()
palmnet.hunt.show_most_common_types(limit=20)
log_memory_usage("Before dels")
del dct_name_facto
del base_model
del compressed_model
del base_layer
del compressed_layer
del sparse_factorization
K.clear_session()
gc.collect()
log_memory_usage("After dels")
palmnet.hunt.show_most_common_types(limit=20)
dct_attributes["nb-param-base-dense"].append(int(nb_param_dense_base))
dct_attributes["nb-param-base-conv"].append(int(nb_param_conv_base))
dct_attributes["nb-param-compressed-dense"].append(int(nb_param_dense_compressed))
dct_attributes["nb-param-compressed-conv"].append(int(nb_param_conv_compressed))
dct_attributes["nb-param-compression-rate-dense"].append(dct_attributes["nb-param-base-dense"][-1] / dct_attributes["nb-param-compressed-dense"][-1])
try:
dct_attributes["nb-param-compression-rate-conv"].append(dct_attributes["nb-param-base-conv"][-1] / dct_attributes["nb-param-compressed-conv"][-1])
except ZeroDivisionError:
dct_attributes["nb-param-compression-rate-conv"].append(np.nan)
df_results = pd.DataFrame.from_dict(dct_attributes)
# if df_results_tmp is not None:
# df_results = pd.concat([df_results, df_results_tmp])
df_results.to_csv(output_dir / "results.csv")
df_results_layers = pd.DataFrame.from_dict(dct_results_matrices)
# if df_results_layers_tmp is not None:
# df_results_layers = pd.concat([df_results_layers, df_results_layers_tmp])
df_results_layers.to_csv(output_dir / "results_layers.csv")
| [
"[email protected]"
]
| |
30965eb40de98acf331d58db74af0f8f602f227d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03085/s149035990.py | 1c1c597445598c94f69ba81db215767cac8dae30 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | # A - Double Helix
# A:アデニン T:チミン G:グアニン C:シトシン
# 対になる組み合わせ A-T G-C
# 標準入力
base = input()
# print(base)
# 条件分岐し、結果を answer に代入
if base == 'A':
# print('T')
answer = 'T'
elif base == 'T':
# print('A')
answer = 'A'
elif base == 'G':
# print('C')
answer = 'C'
elif base == 'C':
# print('G')
answer = 'G'
# 結果の出力
print(answer)
| [
"[email protected]"
]
| |
72385aef0f88fb44670c62fe09108881b5ca1cdd | a934a51f68592785a7aed1eeb31e5be45dd087d3 | /Learning/Network_process_WA/Day1/2020_Jul23/subprocess_old/run_ls01.py | ba2d553ef730e9191baf52a2201f2e782ccafa17 | []
| no_license | nsshayan/Python | 9bf0dcb9a6890419873428a2dde7a802e715be2b | 0cf5420eecac3505071326c90b28bd942205ea54 | refs/heads/master | 2021-06-03T18:41:06.203334 | 2020-09-28T07:28:48 | 2020-09-28T07:28:48 | 35,269,825 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 215 | py | from subprocess import Popen
#p = Popen("echo $PATH", shell=True)
with open("ls.out", "w") as lsout:
p = Popen(["ls", "-l", "/usr"], stdout=lsout)
ret = p.wait()
print("ls exited with code =", ret)
| [
"[email protected]"
]
| |
23721ad4bfebf2e752102dfd7da2d6e58554374c | 6a0ae86bca2d2ece6c92efd5594c0e3b1777ead7 | /EDBRCommon/python/datasets/test_RSGZZ600_cff.py | aa4a12968a86fc2923e54c5b230e1e8a80ccf6d0 | []
| no_license | wangmengmeng/ExoDiBosonResonances | c4b5d277f744e1b1986df9317ac60b46d202a29f | bf5d2e79f59ad25c7a11e7f97552e2bf6a283428 | refs/heads/master | 2016-09-06T14:54:53.245508 | 2014-06-05T15:02:37 | 2014-06-05T15:02:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,483 | py | import FWCore.ParameterSet.Config as cms
readFiles = cms.untracked.vstring()
source = cms.Source("PoolSource",
noEventSort = cms.untracked.bool(True),
duplicateCheckMode = cms.untracked.string("noDuplicateCheck"),
fileNames = readFiles
)
readFiles.extend([
## '/store/cmst3/user/bonato//patTuple/2012/EXOVVtest/newPatTuple_ZZ_1000_c1.root'
# '/store/cmst3/user/bonato//patTuple/2012/EXOVVtest/patExoWW_M600_10_1_KPf.root'
# '/store/cmst3/user/bonato//patTuple/2012/EXOVVtest/patZZ_M1000_5k_20121212.root'
#'file:/afs/cern.ch/user/b/bonato/scratch0/PhysAnalysis/EXOVV_2012/CMGTools/CMSSW_5_3_9/src/ExoDiBosonResonances/PATtupleProduction/python/patTuple.v2.root'
# 'file:/afs/cern.ch/user/b/bonato/scratch0/PhysAnalysis/EXOVV_2012/CMGTools/CMSSW_5_3_9/src/ExoDiBosonResonances/PATtupleProduction/python/patTuple_XWW.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/ExoDiBosonResonances/PATtupleProduction/python/pattuple_mwp1200_old.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/ExoDiBosonResonances/PATtupleProduction/python/pattuple_mwp1200_new.root'
# 'root://xrootd.unl.edu//store/user/mwang/EXOWH_Wprime_M1000_GENSIM_V2/EXOWH_Wprime_M1000_PATtuple_cc_1204/69a9fa67eebd7bf7213e8a26a2d59023/pattuple_mwp1000_cc_1_1_5pv.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/pattuple_mwp1000cc_new.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/pattuple_mwp1000gg_new.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/pattuple_mwp1000bb_new.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/ExoDiBosonResonances/PATtupleProduction/python/pattuple_mwp1200_new.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/pattuple_M1000_test.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/SingleMu__Run2012A_test.root'
# 'file:/afs/cern.ch/work/m/mwang/public/EXO/1128/CMGTools/CMSSW_5_3_9/src/ExoDiBosonResonances/EDBRCommon/prod/DY.root'
# 'root://eoscms//eos/cms/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_DYToLLBinsPtZ_MADGRAPH_20140210_150636/mwang/DYJetsToLL_PtZ-100_TuneZ2star_8TeV_ext-madgraph-tarball/EDBR_PATtuple_edbr_wh_20140210/6dd5c34efa97fc5295a711db48f1622c/DYJetsToLL_PtZ-100_TuneZ2star_8TeV_ext-madgraph-tarball__Summer12_DR53X-PU_S10_START53_V7C-v1__AODSIM_1031_1_JwO.root'
'file:/afs/cern.ch/work/m/mwang/public/ForJennifer/EXOWH_Wprime_M1000_GENSIM_V2__mwang-EXOWH_Wprime_M1000_AODSIM_V2-2c74483358b1f8805e5601fc325d256c__USER_10_2_xXb.root'
# 'root://eoscms//eos/cms/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_SingleElectron_Run2012A-22Jan2013-v1/mwang/c2d529e1c78e50623ca40825abf53f99/SingleElectron__Run2012A-22Jan2013-v1__AOD_114_2_fIY.root'
# '/store/cmst3/group/exovv/mwang/EDBR_PATtuple_edbr_wh_20140210_Summer12MC_DYToLLBinsPtZ_MADGRAPH_20140210_150636/mwang/DYJetsToLL_PtZ-00_TuneZ2star_8TeV_ext-madgraph-tarball/EDBR_PATtuple_edbr_wh_20140210/6dd5c34efa97fc5295a711db48f1622c/DYJetsToLL_PtZ-100_TuneZ2star_8TeV_ext-madgraph-tarball__Summer12_DR53X-PU_S10_START53_V7C-v1__AODSIM_1181_1_VHx.root'
])
| [
"[email protected]"
]
| |
a14c03bc628896e88a3a715353f4b5c93d9778c3 | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part13-introduction-data-visualization/No07-Using-legend.py | bd1fef91573279e954aa0db684577e0a61040372 | []
| no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | #Using legend()
'''
Legends are useful for distinguishing between multiple datasets displayed on common axes. The relevant data are created using specific line colors or markers in various plot commands. Using the keyword argument label in the plotting function associates a string to use in a legend.
For example, here, you will plot enrollment of women in the Physical Sciences and in Computer Science over time. You can label each curve by passing a label argument to the plotting call, and request a legend using plt.legend(). Specifying the keyword argument loc determines where the legend will be placed.
Instructions
Modify the plot command provided that draws the enrollment of women in Computer Science over time so that the curve is labelled 'Computer Science' in the legend.
Modify the plot command provided that draws the enrollment of women in the Physical Sciences over time so that the curve is labelled 'Physical Sciences' in the legend.
Add a legend at the lower center (i.e., loc='lower center').
'''
# Code
# Specify the label 'Computer Science'
plt.plot(year, computer_science, color='red', label='Computer Science')
# Specify the label 'Physical Sciences'
plt.plot(year, physical_sciences, color='blue', label='Physical Sciences')
# Add a legend at the lower center
plt.legend(loc='lower center')
# Add axis labels and title
plt.xlabel('Year')
plt.ylabel('Enrollment (%)')
plt.title('Undergraduate enrollment of women')
plt.show()
| [
"[email protected]"
]
| |
328ecc8c6a133314695a3f5e71fe57df6876cc9c | bdb206758815fa598285e05c23d81829f3ad60a9 | /addons/at2166/controllers/controllers.py | 4e907a688dc832b0d2a90c58e410c59f80a51a82 | []
| no_license | kulius/odoo10_test | 75a9645fbd64ba5fd6901fb441f2e7141f610032 | 5a01107e2337fd0bbe35d87d53a0fe12eff7c59e | refs/heads/master | 2021-07-26T15:05:58.074345 | 2017-11-08T09:04:11 | 2017-11-08T09:04:11 | 109,943,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | # -*- coding: utf-8 -*-
from odoo import http
# class At2166(http.Controller):
# @http.route('/at2166/at2166/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/at2166/at2166/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('at2166.listing', {
# 'root': '/at2166/at2166',
# 'objects': http.request.env['at2166.at2166'].search([]),
# })
# @http.route('/at2166/at2166/objects/<model("at2166.at2166"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('at2166.object', {
# 'object': obj
# }) | [
"[email protected]"
]
| |
99f2714c3fba9228c05928fad3b4c365ac9aa7b1 | 356151747d2a6c65429e48592385166ab48c334c | /backend/customer/threads/order_now/th_get_menu.py | 5e181b84ea83adbffe87077606958d72b475afed | []
| no_license | therealrahulsahu/se_project | c82b2d9d467decd30a24388f66427c7805c23252 | c9f9fd5594191ab7dce0504ca0ab3025aa26a0c1 | refs/heads/master | 2020-06-25T02:51:30.355677 | 2020-04-20T13:01:36 | 2020-04-20T13:01:36 | 199,175,627 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,666 | py | from PyQt5.QtCore import QThread, pyqtSignal
class ThreadGetMenu(QThread):
signal = pyqtSignal('PyQt_PyObject')
def __init__(self, parent_class):
super().__init__()
self.parent_class = parent_class
def run(self):
if self.check_for_veg():
food_query = {
'veg': True,
'region': self.check_for_region(),
'type': self.check_for_type(),
'available': True
}
else:
food_query = {
'region': self.check_for_region(),
'type': self.check_for_type(),
'available': True
}
myc = self.parent_class.MW.DB.food
from pymongo.errors import AutoReconnect
from errors import FoodNotFoundError
try:
data_list = list(myc.find(food_query, {'_id': 1, 'name': 1, 'price': 1}))
if data_list:
self.parent_class.searched_food_list = data_list
self.signal.emit(True)
else:
raise FoodNotFoundError
except FoodNotFoundError as ob:
self.parent_class.MW.mess(str(ob))
except AutoReconnect:
self.parent_class.MW.mess('-->> Network Error <<--')
finally:
self.parent_class.curr_wid.bt_get.setEnabled(True)
def check_for_veg(self):
return self.parent_class.curr_wid.rbt_veg.isChecked()
def check_for_region(self):
if self.parent_class.curr_wid.rbt_north_ind.isChecked():
return 'nid'
elif self.parent_class.curr_wid.rbt_italian.isChecked():
return 'ita'
elif self.parent_class.curr_wid.rbt_south_ind.isChecked():
return 'sid'
elif self.parent_class.curr_wid.rbt_conti.isChecked():
return 'conti'
elif self.parent_class.curr_wid.rbt_thai.isChecked():
return 'thi'
elif self.parent_class.curr_wid.rbt_china.isChecked():
return 'chi'
elif self.parent_class.curr_wid.rbt_rajas.isChecked():
return 'raj'
elif self.parent_class.curr_wid.rbt_none.isChecked():
return 'none'
def check_for_type(self):
if self.parent_class.curr_wid.rbt_starter.isChecked():
return 'sta'
elif self.parent_class.curr_wid.rbt_main.isChecked():
return 'mcs'
elif self.parent_class.curr_wid.rbt_refresh.isChecked():
return 'ref'
elif self.parent_class.curr_wid.rbt_dessert.isChecked():
return 'des'
elif self.parent_class.curr_wid.rbt_bread.isChecked():
return 'bre'
| [
"[email protected]"
]
| |
e46e39a01e13cb2eea5a6f5add4fb61accae3bf1 | c99be9a7a55c6dc3dade46147f116ee6729a19d1 | /tikzplotlib/__about__.py | 4d3b2067e4529f6a610d20626f3fcbed193b58ca | [
"MIT"
]
| permissive | theRealSuperMario/tikzplotlib | 3001cbe11856b1e7d87aa308c0ef99bbd28d1bec | 3c1e08e78cb87ecf4b475f506244813bf99ac705 | refs/heads/master | 2020-12-11T09:36:37.399842 | 2020-11-01T10:27:21 | 2020-11-01T10:27:21 | 233,809,790 | 2 | 0 | MIT | 2020-01-14T09:54:53 | 2020-01-14T09:54:53 | null | UTF-8 | Python | false | false | 221 | py | try:
# Python 3.8
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
try:
__version__ = metadata.version("tikzplotlib")
except Exception:
__version__ = "unknown"
| [
"[email protected]"
]
| |
5f5a0e831a68c5ef684d354ca570acf953792cea | f75ec2c20c3208350d310038a2cd0a67253b44df | /src/swagger_codegen/api/response_deserializer.py | 5abc7163627b96210e3b445db6500c9493171408 | []
| no_license | vichooz/swagger_codegen | e53f59f3cd2c080157863698f932a606705db4e4 | 8238356075eea4218b2e6a645c7ea2b8826b1044 | refs/heads/master | 2022-08-03T04:32:49.291426 | 2020-05-27T06:09:28 | 2020-05-27T06:09:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 850 | py | import abc
from typing import Any
from typing import Optional
import pydantic
from swagger_codegen.api.types import ResponseType
class ResponseDeserializer(abc.ABC):
@abc.abstractmethod
def deserialize(self, deserialize_to: ResponseType, model_body):
pass
class DefaultResponseDeserializer(ResponseDeserializer):
def deserialize(self, deserialize_to: ResponseType, model_body) -> Optional[Any]:
if deserialize_to is None:
return None
if model_body is None:
return None
class Config(pydantic.BaseConfig):
arbitrary_types_allowed = True
pydantic_validator_model = pydantic.create_model(
"PydanticValidatorModel", __root__=(deserialize_to, ...), __config__=Config
)
return pydantic_validator_model(__root__=model_body).__root__
| [
"[email protected]"
]
| |
4a67d8771aca07434a51aa7be4bb84f2c069a433 | 34745a8d54fa7e3d9e4237415eb52e507508ad79 | /Python Fundamentals/Final exam/02_emoji_detector.py | 2561bef627873327e10a68ef7de9312ae81415d8 | []
| no_license | DilyanTsenkov/SoftUni-Software-Engineering | 50476af0dc88b267d72c56fa87eeb88d841164b2 | fe446e3a50a00bb2e48d71ab8f783e0a4a406094 | refs/heads/main | 2023-08-12T18:18:42.144210 | 2021-09-25T11:10:38 | 2021-09-25T11:10:38 | 317,235,419 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | import re
def threshold(input_string):
c_threshold = 1
digit_regex = r"[0-9]"
digits = re.findall(digit_regex, input_string)
for digit in digits:
c_threshold *= int(digit)
return c_threshold
def emoji_checker(input_string, cool):
all_of_emojis = []
cool_of_emojis = []
emoji_regex = r"(?P<symbols>\:\:|\*\*)(?P<emoji>[A-Z][a-z][a-z]+)(?P=symbols)"
emojis = re.finditer(emoji_regex, input_string)
for data in emojis:
coolness = 0
d = data.groupdict()
for char in d["emoji"]:
coolness += ord(char)
emoji_found = d["symbols"] + d["emoji"] + d["symbols"]
all_of_emojis.append(emoji_found)
if coolness > cool:
cool_of_emojis.append(emoji_found)
return all_of_emojis, cool_of_emojis
string = input()
cool_threshold = threshold(string)
all_emojis, cool_emojis = emoji_checker(string, cool_threshold)
print(f"Cool threshold: {cool_threshold}")
print(f"{len(all_emojis)} emojis found in the text. The cool ones are:")
cool_emojis = [print(_, end="\n") for _ in cool_emojis]
| [
"[email protected]"
]
| |
36859f62160f94e4c4d427461f1f1f7aaa00bab4 | 5efc7ab8a298a026bad44596e18de104985a4b71 | /fn_wiki/tests/test_funct_fn_wiki_create_update.py | 9aa1fce69272eadbcde79a40b9ddf69ec5e91908 | [
"MIT"
]
| permissive | RomanDerkach/resilient-community-apps | 4cf0abe443411582e9f57491364ecc2d844ba30d | 1f60fb100e6a697df7b901d7a4aad707fea3dfee | refs/heads/master | 2023-03-11T21:56:18.307942 | 2021-03-02T16:09:33 | 2021-03-02T16:09:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,815 | py | # -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
PACKAGE_NAME = "fn_wiki"
FUNCTION_NAME = "fn_wiki_create_update"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_fn_wiki_create_update_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("fn_wiki_create_update", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("fn_wiki_create_update_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestFnWikiCreateUpdate:
""" Tests for the fn_wiki_create_update function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
mock_fail_path = {
"wiki_path": None,
"wiki_body": "sample text",
"wiki_create_if_missing": False
}
mock_fail_page_not_found = {
"wiki_path": "not found",
"wiki_body": "sample text",
"wiki_create_if_missing": False,
}
mock_fail_parent_not_found = {
"wiki_path": "parent not found/new page",
"wiki_body": "sample text",
"wiki_create_if_missing": False
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_fail_path, None),
(mock_fail_page_not_found, None),
(mock_fail_parent_not_found, None),
])
def test_fail_update(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
with pytest.raises(ValueError):
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'] == False)
assert(results['reason'])
mock_success_title = {
"wiki_path": "ΣΤ",
"wiki_body": "ΣΤ",
"wiki_create_if_missing": True
}
mock_success_w_parent_title = {
"wiki_path": "ΣΤ3/new3",
"wiki_body": "new3",
"wiki_create_if_missing": True
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_title, None),
(mock_success_w_parent_title, None),
])
def test_create_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
mock_success_update_title = {
"wiki_path": "parent1/json2",
"wiki_body": "new3 ΣΤ3",
"wiki_create_if_missing": False
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_update_title, None)
])
def test_update_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
mock_success_update_parent_title = {
"wiki_path": "ΣΤ3/ΣΤ4",
"wiki_body": "ΣΤ4",
"wiki_create_if_missing": True
}
mock_success_update_parent_subparent = {
"wiki_path": "parent1/json2/ΣΤ5",
"wiki_body": "ΣΤ5",
"wiki_create_if_missing": True
}
@pytest.mark.parametrize("mock_inputs, expected_results", [
(mock_success_update_parent_title, None),
(mock_success_update_parent_subparent, None)
])
def test_update_parent_success(self, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_fn_wiki_create_update_function(circuits_app, mock_inputs)
assert(results['success'])
| [
"[email protected]"
]
| |
c8a8f30335dc23ab837f1ee123fbde87fd5009b9 | 49900ba50d4f6c979d6d433577828c8007973125 | /data_utils/ner.py | 597d37cdb973d60f1d05f4fd35b70b226eb1faac | []
| no_license | weizhenzhao/cs224d_nlp_problem_set2 | 9661414965a58b97113f828a47932c5b9d8411df | 302f0e53cdd88147a5c1727d06f0be18270d8a2a | refs/heads/master | 2021-10-22T18:22:31.063591 | 2019-03-12T14:03:36 | 2019-03-12T14:03:36 | 104,356,708 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 591 | py | ##
# Utility functions for NER assignment
# Assigment 2, part 1 for CS224D
##
from data_utils.utils import invert_dict
from numpy import *
def load_wv(vocabfile, wvfile):
wv = loadtxt(wvfile, dtype=float)
with open(vocabfile) as fd:
words = [line.strip() for line in fd]
num_to_word = dict(enumerate(words))
word_to_num = invert_dict(num_to_word)
return wv, word_to_num, num_to_word
def save_predictions(y, filename):
"""Save predictions, one per line."""
with open(filename, 'w') as fd:
fd.write("\n".join(map(str, y)))
fd.write("\n") | [
"[email protected]"
]
| |
f225babc8403680d28288ebf49150e7c4d9c2893 | f8da830331428a8e1bbeadf23345f79f1750bd98 | /msgraph-cli-extensions/v1_0/usersactions_v1_0/azext_usersactions_v1_0/vendored_sdks/usersactions/operations/_user_onenote_notebook_section_group_section_page_parent_notebook_operations.py | 9cbe41f6e394adb19707b01f0c335867f7b93f53 | [
"MIT"
]
| permissive | ezkemboi/msgraph-cli | e023e1b7589461a738e42cbad691d9a0216b0779 | 2ceeb27acabf7cfa219c8a20238d8c7411b9e782 | refs/heads/main | 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,768 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class UserOnenoteNotebookSectionGroupSectionPageParentNotebookOperations(object):
"""UserOnenoteNotebookSectionGroupSectionPageParentNotebookOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~users_actions.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def copy_notebook(
self,
user_id, # type: str
notebook_id, # type: str
section_group_id, # type: str
onenote_section_id, # type: str
onenote_page_id, # type: str
group_id=None, # type: Optional[str]
rename_as=None, # type: Optional[str]
notebook_folder=None, # type: Optional[str]
site_collection_id=None, # type: Optional[str]
site_id=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.MicrosoftGraphOnenoteOperation"
"""Invoke action copyNotebook.
Invoke action copyNotebook.
:param user_id: key: id of user.
:type user_id: str
:param notebook_id: key: id of notebook.
:type notebook_id: str
:param section_group_id: key: id of sectionGroup.
:type section_group_id: str
:param onenote_section_id: key: id of onenoteSection.
:type onenote_section_id: str
:param onenote_page_id: key: id of onenotePage.
:type onenote_page_id: str
:param group_id:
:type group_id: str
:param rename_as:
:type rename_as: str
:param notebook_folder:
:type notebook_folder: str
:param site_collection_id:
:type site_collection_id: str
:param site_id:
:type site_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MicrosoftGraphOnenoteOperation, or the result of cls(response)
:rtype: ~users_actions.models.MicrosoftGraphOnenoteOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.MicrosoftGraphOnenoteOperation"]
error_map = {404: ResourceNotFoundError, 409: ResourceExistsError}
error_map.update(kwargs.pop('error_map', {}))
_body = models.PathsFm3Zd0UsersUserIdOnenoteNotebooksNotebookIdSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdParentnotebookMicrosoftGraphCopynotebookPostRequestbodyContentApplicationJsonSchema(group_id=group_id, rename_as=rename_as, notebook_folder=notebook_folder, site_collection_id=site_collection_id, site_id=site_id)
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.copy_notebook.metadata['url'] # type: ignore
path_format_arguments = {
'user-id': self._serialize.url("user_id", user_id, 'str'),
'notebook-id': self._serialize.url("notebook_id", notebook_id, 'str'),
'sectionGroup-id': self._serialize.url("section_group_id", section_group_id, 'str'),
'onenoteSection-id': self._serialize.url("onenote_section_id", onenote_section_id, 'str'),
'onenotePage-id': self._serialize.url("onenote_page_id", onenote_page_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
header_parameters['Accept'] = 'application/json'
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(_body, 'PathsFm3Zd0UsersUserIdOnenoteNotebooksNotebookIdSectiongroupsSectiongroupIdSectionsOnenotesectionIdPagesOnenotepageIdParentnotebookMicrosoftGraphCopynotebookPostRequestbodyContentApplicationJsonSchema')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.OdataError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('MicrosoftGraphOnenoteOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
copy_notebook.metadata = {'url': '/users/{user-id}/onenote/notebooks/{notebook-id}/sectionGroups/{sectionGroup-id}/sections/{onenoteSection-id}/pages/{onenotePage-id}/parentNotebook/microsoft.graph.copyNotebook'} # type: ignore
| [
"[email protected]"
]
| |
7722fb46c2c820d5c4afe95d9aea8e35da673d71 | dc399f13d79b4fe80734b4ff9a47e6daac046efb | /ControlFlow/IterItems().py | 98e735d55cc6fdd2a1e4737b4d7cea9a8af47272 | []
| no_license | brrbaral/pythonbasic | 22531a75c8fc5e36dd7e9c1dfae31c5809365206 | dc8fbddc7d3671ac14ee5f53db32f2d210f71de0 | refs/heads/master | 2020-07-11T03:12:54.691238 | 2019-08-26T08:41:51 | 2019-08-26T08:41:51 | 204,433,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 115 | py | d1={"Bishow":"Pokhara","shree":"hetauda"}
print("The key-value pait is :")
for i,j in d1.items():
print(i,j) | [
"[email protected]"
]
| |
ca29d0db6649c0311170cc6c4b70e63bb1d627b5 | e7c70a02e61f6d4a97c5933f3550bca22afa6acb | /ros_ws/devel/lib/python2.7/dist-packages/final_lab/srv/_path.py | 5f36d5349329d6f7860095b20230cfa7a98e99cc | []
| no_license | amitf82/Final_Proj_Mobile_Robotics | 14cfe7b182df1294a873283c91688c8ca9526fee | 435a6c1562df030fc462fe1b0a84f968a27a2b85 | refs/heads/master | 2021-01-20T03:22:51.387095 | 2017-04-30T08:25:33 | 2017-04-30T08:25:33 | 89,532,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,227 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from final_lab/pathRequest.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import nav_msgs.msg
import std_msgs.msg
class pathRequest(genpy.Message):
_md5sum = "58d6f138c7de7ef47c75d4b7e5df5472"
_type = "final_lab/pathRequest"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
nav_msgs/Path path
================================================================================
MSG: nav_msgs/Path
#An array of poses that represents a Path for a robot to follow
Header header
geometry_msgs/PoseStamped[] poses
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: geometry_msgs/PoseStamped
# A Pose with reference coordinate frame and timestamp
Header header
Pose pose
================================================================================
MSG: geometry_msgs/Pose
# A representation of pose in free space, composed of postion and orientation.
Point position
Quaternion orientation
================================================================================
MSG: geometry_msgs/Point
# This contains the position of a point in free space
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
"""
__slots__ = ['path']
_slot_types = ['nav_msgs/Path']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
path
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(pathRequest, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.path is None:
self.path = nav_msgs.msg.Path()
else:
self.path = nav_msgs.msg.Path()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v1 = val1.header
buff.write(_struct_I.pack(_v1.seq))
_v2 = _v1.stamp
_x = _v2
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v1.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_v3 = val1.pose
_v4 = _v3.position
_x = _v4
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v5 = _v3.orientation
_x = _v5
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v6 = val1.header
start = end
end += 4
(_v6.seq,) = _struct_I.unpack(str[start:end])
_v7 = _v6.stamp
_x = _v7
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v6.frame_id = str[start:end].decode('utf-8')
else:
_v6.frame_id = str[start:end]
_v8 = val1.pose
_v9 = _v8.position
_x = _v9
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v10 = _v8.orientation
_x = _v10
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs))
_x = self.path.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
length = len(self.path.poses)
buff.write(_struct_I.pack(length))
for val1 in self.path.poses:
_v11 = val1.header
buff.write(_struct_I.pack(_v11.seq))
_v12 = _v11.stamp
_x = _v12
buff.write(_struct_2I.pack(_x.secs, _x.nsecs))
_x = _v11.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_v13 = val1.pose
_v14 = _v13.position
_x = _v14
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v15 = _v13.orientation
_x = _v15
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.path is None:
self.path = nav_msgs.msg.Path()
end = 0
_x = self
start = end
end += 12
(_x.path.header.seq, _x.path.header.stamp.secs, _x.path.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.path.header.frame_id = str[start:end].decode('utf-8')
else:
self.path.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.path.poses = []
for i in range(0, length):
val1 = geometry_msgs.msg.PoseStamped()
_v16 = val1.header
start = end
end += 4
(_v16.seq,) = _struct_I.unpack(str[start:end])
_v17 = _v16.stamp
_x = _v17
start = end
end += 8
(_x.secs, _x.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
_v16.frame_id = str[start:end].decode('utf-8')
else:
_v16.frame_id = str[start:end]
_v18 = val1.pose
_v19 = _v18.position
_x = _v19
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v20 = _v18.orientation
_x = _v20
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
self.path.poses.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4d = struct.Struct("<4d")
_struct_3I = struct.Struct("<3I")
_struct_2I = struct.Struct("<2I")
_struct_3d = struct.Struct("<3d")
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from final_lab/pathResponse.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class pathResponse(genpy.Message):
_md5sum = "3a1255d4d998bd4d6585c64639b5ee9a"
_type = "final_lab/pathResponse"
_has_header = False #flag to mark the presence of a Header object
_full_text = """
bool status
"""
__slots__ = ['status']
_slot_types = ['bool']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(pathResponse, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = False
else:
self.status = False
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
buff.write(_struct_B.pack(self.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
start = end
end += 1
(self.status,) = _struct_B.unpack(str[start:end])
self.status = bool(self.status)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
buff.write(_struct_B.pack(self.status))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
start = end
end += 1
(self.status,) = _struct_B.unpack(str[start:end])
self.status = bool(self.status)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_B = struct.Struct("<B")
class path(object):
_type = 'final_lab/path'
_md5sum = '87fbad184f990f6671a31d6fd2678f60'
_request_class = pathRequest
_response_class = pathResponse
| [
"[email protected]"
]
| |
d5f376ce543ef920ad3197131b8bd756a745399c | 9130bdbd90b7a70ac4ae491ddd0d6564c1c733e0 | /venv/lib/python3.8/site-packages/future/moves/xmlrpc/server.py | ec5409cb2504d3f8fd228a1760fa053fa5767669 | []
| no_license | baruwaa12/Projects | 6ca92561fb440c63eb48c9d1114b3fc8fa43f593 | 0d9a7b833f24729095308332b28c1cde63e9414d | refs/heads/main | 2022-10-21T14:13:47.551218 | 2022-10-09T11:03:49 | 2022-10-09T11:03:49 | 160,078,601 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | /home/runner/.cache/pip/pool/11/20/d7/7695204cac9e1660834a7266069f3338d8c9925b1124ebf2e0eb5a00b7 | [
"[email protected]"
]
| |
8cbeef5ea8b3d7e0aa7655c31e01ef7d0da11446 | 8541f4118c6093c84e78d768285e7007ee5f6a6c | /apps/tax/migrations/0005_auto_20160306_1353.py | 8077c34962cafafda6e7398270db44898639bd2a | []
| no_license | iraycd/awecounting | c81a8ca6b7a4a942e63cf6b7d723f9883e57a107 | 388df4de63146e0a9a211afa522ec50e0f3df443 | refs/heads/master | 2021-01-15T23:30:27.439759 | 2016-03-16T10:34:40 | 2016-03-16T10:34:40 | 57,046,467 | 1 | 0 | null | 2016-04-25T14:03:40 | 2016-04-25T14:03:40 | null | UTF-8 | Python | false | false | 574 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-06 08:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tax', '0004_partytaxpreference'),
]
operations = [
migrations.AlterField(
model_name='partytaxpreference',
name='party',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='tax_preference', to='ledger.Party'),
),
]
| [
"[email protected]"
]
| |
31fafff04d22006623a7bc672c81339d13885407 | 22ccc673a522b52f2678b6ac96e3ff2a104864ff | /jobs/migrations/0005_auto_20150902_0600.py | e6e2f4f6043f8e5e142666aeaf0c677401c0f62a | []
| no_license | ivlevdenis/pythondigest | 07e448da149d92f37b8ce3bd01b645ace1fa0888 | f8ccc44808a26960fb69a4c4c3491df3e6d3d24e | refs/heads/master | 2021-01-18T02:09:42.121559 | 2016-05-15T22:44:34 | 2016-05-15T22:44:34 | 58,350,368 | 0 | 0 | null | 2016-05-09T05:21:39 | 2016-05-09T05:21:39 | null | UTF-8 | Python | false | false | 1,435 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('jobs', '0004_jobfeed_is_activated'),
]
operations = [
migrations.RemoveField(
model_name='jobitem',
name='salary_currency',
),
migrations.RemoveField(
model_name='jobitem',
name='salary_from',
),
migrations.RemoveField(
model_name='jobitem',
name='salary_till',
),
migrations.RemoveField(
model_name='jobitem',
name='url_api',
),
migrations.RemoveField(
model_name='jobitem',
name='url_logo',
),
migrations.AddField(
model_name='jobitem',
name='description',
field=models.TextField(null=True, blank=True, verbose_name='Описание вакансии'),
),
migrations.AlterField(
model_name='jobitem',
name='employer_name',
field=models.CharField(null=True, max_length=255, blank=True, verbose_name='Работодатель'),
),
migrations.AlterField(
model_name='jobitem',
name='place',
field=models.CharField(null=True, max_length=255, blank=True, verbose_name='Место'),
),
]
| [
"[email protected]"
]
| |
b923127047254c84445608e989311a4fb0eb0b40 | 4f4776eb69cbea9ee1c87a22732c5d778855c83a | /leetcode/Number_Complement.py | 6f4cc7d18ff977b72e60674c3283b67bee1f0ecb | []
| no_license | k4u5h4L/algorithms | 4a0e694109b8aadd0e3b7a66d4c20692ecdef343 | b66f43354792b1a6facff90990a7685f5ed36a68 | refs/heads/main | 2023-08-19T13:13:14.931456 | 2021-10-05T13:01:58 | 2021-10-05T13:01:58 | 383,174,341 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,079 | py | '''
Number Complement
Easy
The complement of an integer is the integer you get when you flip all the 0's to 1's and all the 1's to 0's in its binary representation.
For example, The integer 5 is "101" in binary and its complement is "010" which is the integer 2.
Given an integer num, return its complement.
Example 1:
Input: num = 5
Output: 2
Explanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.
Example 2:
Input: num = 1
Output: 0
Explanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.
'''
class Solution:
def findComplement(self, num: int) -> int:
binary = bin(num)[2:]
b = ""
for bit in binary:
if bit == '1':
b += '0'
else:
b += '1'
dec = 0
for i, char in enumerate(reversed(b)):
if char == '1':
dec += (2 ** i)
return dec
| [
"[email protected]"
]
| |
7bccdd943219008b9ab87f2c0d3a9f60a25927c6 | 9b20743ec6cd28d749a4323dcbadb1a0cffb281b | /03_Linear_Algebra_for_Machine_Learning/04/05_vector_division.py | 7a5cbfcb42a4a5db9dfe2635df84d2ce76f0ddf3 | []
| no_license | jggrimesdc-zz/MachineLearningExercises | 6e1c7e1f95399e69bba95cdfe17c4f8d8c90d178 | ee265f1c6029c91daff172b3e7c1a96177646bc5 | refs/heads/master | 2023-03-07T19:30:26.691659 | 2021-02-19T08:00:49 | 2021-02-19T08:00:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 184 | py | # vector division
from numpy import array
# define first vector
a = array([1, 2, 3])
print(a)
# define second vector
b = array([1, 2, 3])
print(b)
# divide vectors
c = a / b
print(c)
| [
"[email protected]"
]
| |
47010cc8029ce26f087bc5af210729e2ad8964d0 | 6062dc6c23a4013a879617cd9dd8d60fba582964 | /day23/machine.py | 16384f67fcfce888fea5c9cb2095b5fec6a57bfc | []
| no_license | grey-area/advent-of-code-2017 | 8134a1213e69460e24a821ff96e38cbc7f83b480 | 87c213277e4535fff0a1dcf7ad26e182e20b8165 | refs/heads/master | 2020-04-13T05:38:36.852721 | 2018-12-30T23:31:00 | 2018-12-30T23:31:00 | 162,997,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,514 | py | from collections import defaultdict
from collections import namedtuple
from collections import deque
import operator
from functools import partial
Instruction = namedtuple('Instruction', ['op', 'args'])
class Machine():
def __init__(self, filename):
self.registers = defaultdict(int)
self.load_program(filename)
self.ip = 0
self.terminated = False
self.mul_called = 0
def cast(self, X):
try:
return int(X)
except ValueError:
return self.registers[X]
def sub(self, X, Y):
self.registers[X] = self.registers[X] - self.cast(Y)
def mul(self, X, Y):
self.registers[X] = self.registers[X] * self.cast(Y)
self.mul_called += 1
def jnz(self, X, Y):
if self.cast(X) != 0:
self.ip += self.cast(Y) - 1
def set(self, X, Y):
self.registers[X] = self.cast(Y)
def load_program(self, filename):
ops = {}
self.program = []
ops['jnz'] = self.jnz
ops['set'] = self.set
ops['sub'] = self.sub
ops['mul'] = self.mul
with open(filename) as f:
text = f.read().splitlines()
for line in text:
op_str, *args = line.split(' ')
self.program.append(Instruction(ops[op_str], args))
def step(self):
op, args = self.program[self.ip]
op(*args)
self.ip += 1
if self.ip < 0 or self.ip >= len(self.program):
self.terminated = True
| [
"[email protected]"
]
| |
7084db3062d66581c38fbdc43d86b9d20a9172c9 | 4926667354fa1f5c8a93336c4d6e2b9f6630836e | /1534.py | 13cc2b14de3bdc53a7c3d07c0f26668d8b35111d | []
| no_license | nascarsayan/lintcode | 343b3f6e7071479f0299dd1dd1d8068cbd7a7d9e | 4da24b9f5f182964a1bdf4beaa8afc17eb7a70f4 | refs/heads/master | 2021-07-13T12:31:45.883179 | 2020-07-20T02:27:53 | 2020-07-20T02:27:53 | 185,825,565 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 825 | py | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param root: root of a tree
@return: head node of a doubly linked list
"""
def treeToDoublyList(self, root):
# Write your code here.
def recurse(root):
if root is None:
return (None, None)
st, fl = root, root
if root.left is not None:
lst, lfl = recurse(root.left)
lfl.right = root
root.left = lfl
st = lst
if root.right is not None:
rst, rfl = recurse(root.right)
root.right = rst
rst.left = root
fl = rfl
return (st, fl)
if root is None:
return None
hd, tl = recurse(root)
hd.left = tl
tl.right = hd
return hd
| [
"[email protected]"
]
| |
fd30cbf33b7a94e9cba9d39a95ec3ac9243b5d48 | e89509b453632747077bc57dbec265a7703d5c7c | /function/globalfunc/baiscadd.py | 8c7c28e3fdb91a3022af78a3ba7e14b70d447686 | []
| no_license | Madhav2108/udemy-python-as | a9dcfdbfdc1bb85471aa66de77957e962a7c5486 | 0bc6a501516618fb3c7ab10be6bc16c047aeec3f | refs/heads/master | 2023-03-30T11:25:16.064592 | 2021-03-30T18:10:46 | 2021-03-30T18:10:46 | 286,001,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | a = 15
b = 10
def add():
c = a + b
print(c)
add() | [
"[email protected]"
]
| |
e07f624ea0d255df65ac483eff918d2f319b22b5 | afea9757be324c8def68955a12be11d71ce6ad35 | /willyanealves/services/migrations/0009_auto_20201209_1404.py | b2e2a364f136f17ebd91a275a705f8061d4ef9ea | []
| no_license | bergpb/willyane-alves | c713cac3ec3a68005f3b8145985693d2477ba706 | 8b2b9922ba35bf2043f2345228f03d80dbd01098 | refs/heads/master | 2023-02-10T19:57:50.893172 | 2021-01-11T16:17:14 | 2021-01-11T16:17:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | # Generated by Django 3.1.2 on 2020-12-09 17:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('services', '0008_auto_20201209_1400'),
]
operations = [
migrations.AlterField(
model_name='service',
name='price',
field=models.DecimalField(decimal_places=2, max_digits=6, verbose_name='Valor'),
),
]
| [
"[email protected]"
]
| |
101feda1a0f140f3e9c0891e6c61e0269a85ac2e | dda862418770f3885256d96e9bdb13d0759c5f43 | /codeforces/div-2/nastya-and-rice.py | a2c25afd65589336a3210b2dd8ff1e66d0aefc44 | [
"MIT"
]
| permissive | bellatrixdatacommunity/data-structure-and-algorithms | d56ec485ebe7a5117d4922caeb0cd44c5dddc96f | d24c4001a797c12347973263a0f4f98939e86900 | refs/heads/master | 2022-12-03T00:51:07.944915 | 2020-08-13T20:30:51 | 2020-08-13T20:30:51 | 270,268,375 | 4 | 0 | MIT | 2020-08-13T20:30:53 | 2020-06-07T10:19:36 | Python | UTF-8 | Python | false | false | 3,299 | py | """
[A. Nastya and Rice](https://codeforces.com/contest/1341/problem/A)
time limit per test1 second
memory limit per test256 megabytes
inputstandard input
outputstandard output
Nastya just made a huge mistake and dropped a whole package of rice on the floor. Mom will come soon. If she sees this,
then Nastya will be punished.
In total, Nastya dropped 𝑛 grains. Nastya read that each grain weighs some integer number of grams from 𝑎−𝑏 to 𝑎+𝑏,
inclusive (numbers 𝑎 and 𝑏 are known), and the whole package of 𝑛 grains weighs from 𝑐−𝑑 to 𝑐+𝑑 grams, inclusive
(numbers 𝑐 and 𝑑 are known). The weight of the package is the sum of the weights of all 𝑛 grains in it.
Help Nastya understand if this information can be correct. In other words, check whether each grain can have such a
mass that the 𝑖-th grain weighs some integer number 𝑥𝑖 (𝑎−𝑏≤𝑥𝑖≤𝑎+𝑏), and in total they weigh from 𝑐−𝑑 to 𝑐+𝑑,
inclusive (𝑐−𝑑≤∑𝑖=1𝑛𝑥𝑖≤𝑐+𝑑).
Input
The input consists of multiple test cases. The first line contains a single integer 𝑡 (1≤𝑡≤1000) — the number of test
cases.
The next 𝑡 lines contain descriptions of the test cases, each line contains 5 integers: 𝑛 (1≤𝑛≤1000) — the number of
grains that Nastya counted and 𝑎,𝑏,𝑐,𝑑 (0≤𝑏<𝑎≤1000,0≤𝑑<𝑐≤1000) — numbers that determine the possible weight of
one grain of rice (from 𝑎−𝑏 to 𝑎+𝑏) and the possible total weight of the package (from 𝑐−𝑑 to 𝑐+𝑑).
Output
For each test case given in the input print "Yes", if the information about the weights is not inconsistent, and print
"No" if 𝑛 grains with masses from 𝑎−𝑏 to 𝑎+𝑏 cannot make a package with a total mass from 𝑐−𝑑 to 𝑐+𝑑.
Example
inputCopy
5
7 20 3 101 18
11 11 10 234 2
8 9 7 250 122
19 41 21 321 10
3 10 8 6 1
outputCopy
Yes
No
Yes
No
Yes
Note
In the first test case of the example, we can assume that each grain weighs 17 grams, and a pack 119 grams, then really
Nastya could collect the whole pack.
In the third test case of the example, we can assume that each grain weighs 16 grams, and a pack 128 grams, then really
Nastya could collect the whole pack.
In the fifth test case of the example, we can be assumed that 3 grains of rice weigh 2, 2, and 3 grams, and a pack is 7
grams, then really Nastya could collect the whole pack.
In the second and fourth test cases of the example, we can prove that it is impossible to determine the correct weight
of all grains of rice and the weight of the pack so that the weight of the pack is equal to the total weight of all collected grains.
"""
import sys
if __name__ == "__main__":
input = sys.stdin.read()
data = list(map(int, input.split()))
T = int(data[0])
it = 1
while T > 0:
n = data[it]
a = data[it + 1]
b = data[it + 2]
c = data[it + 3]
d = data[it + 4]
mini = c - d
maxi = c + d
min_rice = mini / n if n != 0 else 0
max_rice = maxi / n if n != 0 else 0
if max_rice < (a - b) or min_rice > (a + b):
print("No")
else:
print("Yes")
it += 5
T -= 1
| [
"[email protected]"
]
| |
edfa698a2b59a1f3f4933f667ae163d842cb428d | f06ddca5258290a1e7448a18e1d24a9d20226fbd | /pytext/common/constants.py | 3b0c31b01f7bc57811441b3f5a267b920e948602 | [
"BSD-3-Clause"
]
| permissive | mruberry/pytext | 6d64bc37429e3dd5581e5b3b6bf60bd216b6f445 | 3bba58a048c87d7c93a41830fa7853896c4b3e66 | refs/heads/master | 2022-07-16T07:41:47.781126 | 2020-05-14T04:52:35 | 2020-05-14T04:54:33 | 263,892,770 | 2 | 0 | NOASSERTION | 2020-05-14T11:11:33 | 2020-05-14T11:11:32 | null | UTF-8 | Python | false | false | 2,357 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from enum import Enum
class DatasetFieldName:
DOC_LABEL_FIELD = "doc_label"
WORD_LABEL_FIELD = "word_label"
UTTERANCE_FIELD = "utterance"
TEXT_FIELD = "word_feat"
SEQ_FIELD = "seq_word_feat"
DICT_FIELD = "dict_feat"
RAW_DICT_FIELD = "sparsefeat"
CHAR_FIELD = "char_feat"
DENSE_FIELD = "dense_feat"
CONTEXTUAL_TOKEN_EMBEDDING = "contextual_token_embedding"
DOC_WEIGHT_FIELD = "doc_weight"
WORD_WEIGHT_FIELD = "word_weight"
RAW_WORD_LABEL = "raw_word_label"
TOKEN_INDICES = "token_indices"
TOKEN_RANGE = "token_range"
TOKENS = "tokens"
LANGUAGE_ID_FIELD = "lang"
SEQ_LENS = "seq_lens"
TARGET_SEQ_LENS = "target_seq_lens"
RAW_SEQUENCE = "raw_sequence"
SOURCE_SEQ_FIELD = "source_sequence"
TARGET_SEQ_FIELD = "target_sequence"
NUM_TOKENS = "num_tokens"
class PackageFileName:
SERIALIZED_EMBED = "pretrained_embed_pt_serialized"
RAW_EMBED = "pretrained_embed_raw"
class DFColumn:
DOC_LABEL = "doc_label"
WORD_LABEL = "word_label"
UTTERANCE = "text"
ALIGNMENT = "alignment"
DICT_FEAT = "dict_feat"
DENSE_FEAT = "dense_feat"
RAW_FEATS = "raw_feats"
MODEL_FEATS = "model_feats"
DOC_WEIGHT = "doc_weight"
WORD_WEIGHT = "word_weight"
TOKEN_RANGE = "token_range"
LANGUAGE_ID = "lang"
SOURCE_SEQUENCE = "source_sequence"
CONTEXT_SEQUENCE = "context_sequence"
TARGET_SEQUENCE = "target_sequence"
SOURCE_FEATS = "source_feats"
TARGET_TOKENS = "target_tokens"
SEQLOGICAL = "seqlogical"
TARGET_PROBS = "target_probs"
TARGET_LOGITS = "target_logits"
TARGET_LABELS = "target_labels"
class Padding:
WORD_LABEL_PAD = "PAD_LABEL"
WORD_LABEL_PAD_IDX = 0
DEFAULT_LABEL_PAD_IDX = -1
class VocabMeta:
UNK_TOKEN = "<unk>"
UNK_NUM_TOKEN = f"{UNK_TOKEN}-NUM"
PAD_TOKEN = "<pad>"
EOS_TOKEN = "</s>"
INIT_TOKEN = "<s>"
PAD_SEQ = "<pad_seq>"
EOS_SEQ = "</s_seq>"
INIT_SEQ = "<s_seq>"
class BatchContext:
IGNORE_LOSS = "ignore_loss"
INDEX = "row_index"
TASK_NAME = "task_name"
class Stage(Enum):
TRAIN = "Training"
EVAL = "Evaluation"
TEST = "Test"
OTHERS = "Others"
class RawExampleFieldName:
ROW_INDEX = "row_index"
| [
"[email protected]"
]
| |
4fcdf50c43cf0c0a802c7899882d88c66afb5521 | e70b678712a355a0b51632728c7781b0bdcf29f4 | /Algorithms/Python/Best-Time-to-Buy-and-Sell-Stock.py | aa4097ebb2db64fb2c8d11bb08368e8d97f353a7 | []
| no_license | keyi/Leetcode_Solutions | b3e3c6835ed335d7d4ad53a1b37e59ac15fcf3af | 69e4e969b435ff2796bd7c4b5dad9284a853ab54 | refs/heads/master | 2020-05-21T23:36:20.450053 | 2018-11-11T03:45:28 | 2018-11-11T03:45:28 | 33,714,612 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | class Solution(object):
def maxProfit(self, prices):
"""
:type prices: List[int]
:rtype: int
"""
if len(prices) < 2:
return 0
ans, minNum = 0, prices[0]
for i in range(1, len(prices)):
if prices[i] > minNum:
ans = max(prices[i] - minNum, ans)
else:
minNum = prices[i]
return ans
| [
"[email protected]"
]
| |
4c4e498f8f69a2285e2c364574d94132fee73875 | 4dd695521343d56ff943e8c1768343d7680714e3 | /experiments/scripts_auto_closedset_ynoguti/config_iVector_200_fold6.py | e61d28d540206408e386efddf28c2f7122093a8b | []
| no_license | natharb/environment | ea659ee541f6473e92b5b30c549e52b66f47b280 | 86e6cee6e01d2370abeb7c55a2c8a15001735919 | refs/heads/master | 2021-09-28T02:39:02.222966 | 2018-11-13T12:03:34 | 2018-11-13T12:03:34 | 139,762,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,314 | py | #!/usr/bin/env python
# vim: set fileencoding=utf-8 :
#Nathália Alves Rocha Batista ([email protected])
import sys
sys.path.insert(0, '.')
import bob.bio.spear
import bob.bio.gmm
import numpy
import scipy.spatial
temp_directory = './results/closedset_ynoguti/iVector/200/fold_6/temp/'
result_directory = './results/closedset_ynoguti/iVector/200/fold_6/results/'
sub_directory = 'subdirectory'
database = 'database_iVector_200_fold6.py'
groups = ['dev']
#groups = ['dev', 'eval']
preprocessor = bob.bio.spear.preprocessor.Energy_2Gauss(max_iterations = 10, convergence_threshold = 0.0005, variance_threshold = 0.0005, win_length_ms = 20., win_shift_ms = 10., smoothing_window = 10)
extractor = bob.bio.spear.extractor.Cepstral(win_length_ms = 25, win_shift_ms = 10, n_filters = 24 , dct_norm = False, f_min = 0, f_max = 4000, delta_win = 2, mel_scale = True, with_energy = True, with_delta = True, with_delta_delta = True, n_ceps = 19, pre_emphasis_coef = 0.97)
algorithm = bob.bio.gmm.algorithm.IVector(subspace_dimension_of_t = 200, tv_training_iterations = 10, update_sigma = True, use_whitening = True, use_lda = False, use_wccn = False, use_plda = False, lda_dim = 50, plda_dim_F = 50, plda_dim_G = 50, plda_training_iterations = 50, number_of_gaussians = 256)
parallel = 40
verbose = 2 | [
"[email protected]"
]
| |
cdbd67b1a12b3c7320da2aafaa87a06508e9b4de | 5ef19fdf04970ed0481ff29234a11b812b55a257 | /OS/SRT.py | 89a0d94dba43339334289d424480551cfb9d8b02 | []
| no_license | priyamshah112/Study | 636bfadee2384b39399b8b2c03349c9faf8853df | 2ea4341d8497573b014a5275d548289696fe3662 | refs/heads/master | 2021-06-28T15:19:24.002518 | 2020-10-02T07:36:24 | 2020-10-02T07:36:24 | 149,157,682 | 0 | 0 | null | 2020-10-02T07:36:25 | 2018-09-17T16:48:41 | Java | UTF-8 | Python | false | false | 1,762 | py | class Process:
def __init__(self, p_no, at, bt,wt,tat,nt,ct,rt):
self.p_no = p_no
self.at = at
self.bt = bt
self.wt =wt
self.tat =tat
self.nt =nt
self.ct=ct
self.rt=rt
def Shift(alist):
alist.sort(key=lambda x:x.rt)
return alist
def main():
n=int(input("Enter number of processes : "))
q=1
pt = []
chart = []
queue=[]
time=0
ap=0 #arrived processes
rp=0 #ready processes
done=0
start=0
avgwt=0
avgtat=0
avgnt=0
for i in range(0,n):
pt.insert(i,Process(i,int(input("Enter Arrival Time : ")),int(input("Enter Burst Time :")),0.0,0.0,0.0,0,0))
pt[i].rt=pt[i].bt
while(done<n):
for i in range(ap,n):
if time>=pt[i].at:
queue.append(pt[i])
ap+=1
rp+=1
if rp<1:
chart.append(pt[0].p_no)
time+=1
continue
if start:
queue = Shift(queue)
if queue[0].rt > 0:
for g in range(time, time+q):
chart.append(queue[0].p_no)
time+=q
queue[0].rt-=q
else:
pt[queue[0].p_no].ct=time
queue.pop(0)
done+=1
rp-=1
start=1
print(chart)
for i in range(0,n):
pt[i].tat = pt[i].ct-pt[i].at
avgtat+=pt[i].tat
pt[i].wt = pt[i].tat - pt[i].bt
avgwt+=pt[i].wt
pt[i].nt = pt[i].tat / pt[i].bt
avgnt+=pt[i].nt
print("Process no.\t AT\t BT\t WT\t TAT\t NT\t CT\t")
for i in range(0,n):
print(str(pt[i].p_no)+" \t\t "+str(pt[i].at)+" \t "+str(pt[i].bt)+" \t "+str(round(pt[i].wt,2))+" \t "+str(round(pt[i].tat,2))+" \t "+str(round(pt[i].nt,2))+" \t "+str(pt[i].ct))
print("Average Waiting time",avgwt/n)
print("Average TAT",avgtat/n)
print("Average Normalized Time",avgnt/n)
main()
| [
"[email protected]"
]
| |
87a9d5fc07b1eeb59551a66e38e121c1bcb52f4b | bb0eeade4685dc89ff8a53beb813afdf7394989d | /ML2018/commend sys/readers.py | 7d306676d9c3cffdfe249ecd0402e19a6f313dbb | []
| no_license | zhaocheng1996/pyproject | 72929cd0ba2f0486d7dc87a7defa82656bf75a8e | 0a1973dda314f844f9898357bc4a5c8ee3f2246d | refs/heads/master | 2021-10-26T08:38:43.675739 | 2019-04-11T13:52:46 | 2019-04-11T13:52:46 | 176,939,063 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,145 | py | from __future__ import absolute_import, division, print_function
import numpy as np
import pandas as pd
def read_file(filname, sep="\t"):
col_names = ["user", "item", "rate", "st"]#st是timestamps时间戳
df = pd.read_csv(filname, sep=sep, header=None, names=col_names, engine='python')
df["user"] -= 1
df["item"] -= 1
for col in ("user", "item"):
df[col] = df[col].astype(np.int32)
df["rate"] = df["rate"].astype(np.float32)
#print(len(df))
return df
#print(df)
# user item rate st
# 0 0 1192 5.0 978300760
# 1 0 660 3.0 978302109
# 2 0 913 3.0 978301968
class ShuffleIterator(object):
"""
Randomly generate batches
"""
def __init__(self, inputs, batch_size=10):
self.inputs = inputs
self.batch_size = batch_size
self.num_cols = len(self.inputs)
self.len = len(self.inputs[0])
self.inputs = np.transpose(np.vstack([np.array(self.inputs[i]) for i in range(self.num_cols)]))
def __len__(self):
return self.len
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
ids = np.random.randint(0, self.len, (self.batch_size,))
out = self.inputs[ids, :]
return [out[:, i] for i in range(self.num_cols)]
class OneEpochIterator(ShuffleIterator):
"""
Sequentially generate one-epoch batches, typically for test data
"""
def __init__(self, inputs, batch_size=10):
super(OneEpochIterator, self).__init__(inputs, batch_size=batch_size)
if batch_size > 0:
self.idx_group = np.array_split(np.arange(self.len), np.ceil(self.len / batch_size))
else:
self.idx_group = [np.arange(self.len)]
self.group_id = 0
def next(self):
if self.group_id >= len(self.idx_group):
self.group_id = 0
raise StopIteration
out = self.inputs[self.idx_group[self.group_id], :]
self.group_id += 1
return [out[:, i] for i in range(self.num_cols)]
read_file('./ml-1m/ratings.dat', sep="::")
| [
"[email protected]"
]
| |
79dcf66b9517d6c9857138b38aa4bebd074af7e9 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_noisy2781.py | 63f0d647daa70d02a644d9fe38bd1a0e985c5100 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,238 | py | # qubit number=4
# total number=40
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=13
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.x(input_qubit[3]) # number=18
prog.cx(input_qubit[0],input_qubit[3]) # number=19
prog.cx(input_qubit[0],input_qubit[3]) # number=15
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[1]) # number=31
prog.cz(input_qubit[2],input_qubit[1]) # number=32
prog.h(input_qubit[1]) # number=33
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[0]) # number=24
prog.cz(input_qubit[3],input_qubit[0]) # number=25
prog.h(input_qubit[0]) # number=26
prog.h(input_qubit[0]) # number=37
prog.cz(input_qubit[3],input_qubit[0]) # number=38
prog.h(input_qubit[0]) # number=39
prog.z(input_qubit[3]) # number=29
prog.cx(input_qubit[3],input_qubit[0]) # number=30
prog.x(input_qubit[2]) # number=23
prog.cx(input_qubit[3],input_qubit[0]) # number=22
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[3]) # number=36
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.cx(input_qubit[3],input_qubit[0]) # number=35
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = FakeVigo()
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy2781.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
]
| |
5fb88b6a250fc8138e50b016b49f98c4fc0590f7 | 41c5f7da28b87a3034754254d21791b322e819d8 | /madana_apiclient/models/xml_ns0_process.py | 8bfbc39cbd2a85998fdbcc992333a3857479e0a0 | []
| no_license | MADANA-IO/madana-apiclient-python | 16cb3eb807897903df2a885a94a2c02fc405818a | 40dc21ab43d9565ac3dff86d7270093cce112753 | refs/heads/master | 2023-03-08T05:02:32.616469 | 2021-02-11T10:17:30 | 2021-02-11T10:17:30 | 287,797,297 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,648 | py | # coding: utf-8
"""
madana-api
<h1>Using the madana-api</h1> <p>This documentation contains a Quickstart Guide, relating client functionality and information about the available endpoints and used datamodels. </p> <p> The madana-api and its implementations are still in heavy development. This means that there may be problems in our protocols, or there may be mistakes in our implementations. We take security vulnerabilities very seriously. If you discover a security issue, please bring it to our attention right away! If you find a vulnerability that may affect live deployments -- for example, by exposing a remote execution exploit -- please send your report privately to [email protected]. Please DO NOT file a public issue. If the issue is a protocol weakness that cannot be immediately exploited or something not yet deployed, just discuss it openly </p> <br> <p> Note: Not all functionality might be acessible without having accquired and api-license token. For more information visit <a href=\"https://www.madana.io\">www.madana.io</a> </p> <br> # noqa: E501
The version of the OpenAPI document: 0.4.16-master.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from madana_apiclient.configuration import Configuration
class XmlNs0Process(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
}
attribute_map = {
}
def __init__(self, local_vars_configuration=None): # noqa: E501
"""XmlNs0Process - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, XmlNs0Process):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, XmlNs0Process):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
a8edb0da7d8720a5f48f1695b3f768a2a34ec969 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /010_strings/_exercises/Python 3 Most Nessesary/6.10. Search and replace in line.py | c8f366f01071b74a68d0f19128b40eb84fc3a1d0 | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 2,873 | py | # # -*- coding: utf-8 -*-
#
# s = "пример пример Пример"
# print ?.f.. "при" , ?.f.. "При" , ?.f.. "тест"
# # (0, 14, -1)
# print ?.f.. "при", 9 , ?.f.. "при", 0, 6 , ?.f.. "при", 7, 12
# # (-1, 0, 7)
#
#
# s = "пример пример Пример"
# print ?.i..("при" , ?.i..("при", 7, 12 , ?.i..("При", 1
# # (0, 7, 14)
# # print(s.index("тест"))
# # Traceback (most recent call last):
# # File "<pyshell#24>", line 1, in <module>
# # s.index("тест")
# # ValueError: substring not found
#
#
# s = "пример пример Пример Пример"
# print ?.rf.. "при" , ?.rf.. "При" , ?.rf.. "тест"
# # (7, 21, -1)
# print ?.f.. "при", 0, 6 , ?.f.. "При", 10, 20
# # (0, 14)
#
#
# s = "пример пример Пример Пример"
# print ?.ri.. "при" , ?.ri.. "При" , ?.ri.. "при", 0, 6
# # (7, 21, 0)
# # print(s.rindex("тест"))
# # Traceback (most recent call last):
# # File "<pyshell#30>", line 1, in <module>
# # s.rindex("тест")
# # ValueError: substring not found
#
#
# s = "пример пример Пример Пример"
# print ?.c.. "при" , ?.c.. "при", 6 , ?.c.. "При"
# # (2, 1, 2)
# print ?.c.. "тест"
# 0
#
#
# s = "пример пример Пример Пример"
# print ?.st..w.. "при" , ?.st..w.. "При"
# # (True, False)
# print ?.st..w.. "при", 6 , ?.st..w.. "При", 14
# # (False, True)
#
#
# s = "пример пример Пример Пример"
# print ?.st..w.. "при", "При"
# # True
#
#
# s = "подстрока ПОДСТРОКА"
# print ?.e..w.. "ока" , ?.e..w.. "ОКА"
# # (False, True)
# print ?.e..w.. "ока", 0, 9
# # True
#
#
# s = "подстрока ПОДСТРОКА"
# print ?.e..w.. "ока", "ОКА"
# # True
#
# s = "Привет, Петя"
# print ?.re.. "Петя", "Вася"
# # Привет, Вася
# print ?.re.. "петя", "вася" # Зависит от регистра
# # Привет, Петя
# s = "strstrstrstrstr"
# print ?.re.. "str", "" , ?.re.. "str", "", 3
# # ('', 'strstr')
#
#
# s = "Пример"
# d = o.. "П" N.. o.. "р" o.. "Р"
# print ?
# # {1088: 1056, 1055: None}
# print ?.tr.. d
# # 'РимеР'
#
#
# t = st_.m.tr.. "а" "А", "о" "О", "с" N..
# print(t
# # {1072: 'А', 1089: None, 1086: 'О'}
# print "строка".tr.. t
# # 'трОкА'
#
#
# t = st_.m.tr.. "абвгдежзи", "АБВГДЕЖЗИ"
# print(t)
# # {1072: 1040, 1073: 1041, 1074: 1042, 1075: 1043, 1076: 1044,
# # 1077: 1045, 1078: 1046, 1079: 1047, 1080: 1048}
# print "абвгдежзи".tr.. t
# # 'АБВГДЕЖЗИ'
#
#
# t = st_.m.tr.. "123456789", "0" * 9, "str"
# print(t)
# # {116: None, 115: None, 114: None, 49: 48, 50: 48, 51: 48,
# # 52: 48, 53: 48, 54: 48, 55: 48, 56: 48, 57: 48}
# print "str123456789str".tr.. t
# # '000000000' | [
"[email protected]"
]
| |
258bb0e2762aefc4fda2a6a064e89faad4e34e96 | ff81a9d7880f1b85a1dc19d5eba5ac72d7179c86 | /pychron/hardware/polyinomial_mapper.py | aa3f53521645648ca77a9b4089bb88812a44f0bd | [
"Apache-2.0"
]
| permissive | UManPychron/pychron | 2fb7e479a9f492423c0f458c70102c499e1062c4 | b84c9fd70072f9cbda30abe2c471e64fe3dd75d8 | refs/heads/develop | 2022-12-03T23:32:45.579326 | 2020-01-29T19:02:20 | 2020-01-29T19:02:20 | 36,100,637 | 0 | 0 | null | 2015-05-23T00:10:06 | 2015-05-23T00:10:05 | null | UTF-8 | Python | false | false | 2,147 | py | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from numpy import poly1d
from scipy import optimize
from traits.api import HasTraits, List, Float
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.core.helpers.strtools import csv_to_floats
class PolynomialMapper(HasTraits):
"""
list of coefficients. see numpy.poly1d to see exactly how coefficients used
coefficient = 1,2,3
==> 1*x^2+2*x+3
"""
_coefficients = List
output_low = Float(0)
output_high = Float(100)
_polynomial = None
def set_coefficients(self, cs):
self._coefficients = cs
self._polynomial = poly1d(cs)
def parse_coefficient_string(self, s):
self.set_coefficients(csv_to_floats(s))
def map_measured(self, v):
"""
convert a measured value to an output value (Voltage -> Temp)
"""
if self._polynomial:
v = self._polynomial(v)
return v
def map_output(self, v):
"""
convert an output value to measured value (Voltage <- Temp)
"""
c=self._coefficients[:]
c[-1] -= v
return optimize.brentq(poly1d(c), self.output_low, self.output_high)
# ============= EOF =============================================
| [
"[email protected]"
]
| |
04398fb29841e18b9505fe74de19ad29fe08b860 | 7a527060afabd2e0867d5dcf4b75592b43ef5005 | /Leetcode/二叉树/103. 二叉树的锯齿形层次遍历.py | d361d16ca05d5ccb931c8c609b61586d0b68b318 | []
| no_license | Stevenzzz1996/MLLCV | ff01a276cf40142c1b28612cb5b43e563ad3a24a | 314953b759212db5ad07dcb18854bf6d120ba172 | refs/heads/master | 2023-02-10T18:11:30.399042 | 2021-01-05T12:05:21 | 2021-01-05T12:05:21 | 267,804,954 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 903 | py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/5/7 15:01
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:
if not root: return []
res = []
cur = [root]
depth = 0
while cur:
tmp = []
next_level = []
for node in cur:
tmp.append(node.val)
if node.left:
next_level.append(node.left)
if node.right:
next_level.append(node.right)
if depth % 2 == 1:
res.append(tmp[::-1])
else:
res.append(tmp)
depth += 1
cur = next_level
return res | [
"[email protected]"
]
| |
d6373eff10443fed41f31fbc6f731d44a1c41826 | 02b3e28fa0b4f6ece144a9455c32194e63f4bf17 | /vickitrix/__init__.py | a5c33435030889c262f303092f8264c59058f6b6 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
]
| permissive | gitter-badger/vickitrix | 7b1be0813bc21503b995d75f57819397b2fcb84e | b33a6593837c5302dcc95867e982f1713d234bc9 | refs/heads/master | 2021-01-15T17:15:14.147257 | 2017-08-08T14:09:41 | 2017-08-08T14:09:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,542 | py | #!/usr/bin/env python
"""
vickitrix
Checks tweets using http://www.tweepy.org/ and
uses rules specified in file to make market trades on GDAX using
https://github.com/danpaquin/GDAX-Python. Default rules are stored in
rules/vicki.py and follow the tweets of @vickicryptobot.
"""
from __future__ import print_function
import sys
# For 2-3 compatibility
try:
input = raw_input
except NameError:
pass
_help_intro = """vickitrix allows users to base GDAX trades on tweets."""
_key_derivation_iterations = 5000
try:
import gdax
except ImportError as e:
e.message = (
'vickitrix requires GDAX-Python. Install it with "pip install gdax".'
)
raise
try:
from twython import TwythonStreamer, Twython, TwythonError
except ImportError as e:
e.message = (
'vickitrix requires Twython. Install it with '
'"pip install twython".'
)
raise
try:
from Crypto.Cipher import AES
from Crypto.Protocol import KDF
from Crypto import Random
except ImportError:
e.message = (
'vickitrix requires PyCrypto. Install it with '
'"pip install pycrypto".'
)
raise
import os
import errno
import time
import argparse
import getpass
import datetime
import base64
import json
# In case user wants to use regular expressions on conditions/funds
import re
def help_formatter(prog):
""" So formatter_class's max_help_position can be changed. """
return argparse.HelpFormatter(prog, max_help_position=40)
def print_to_screen(message, newline=True, carriage_return=False):
""" Prints message to stdout as well as stderr if stderr is redirected.
message: message to print
newline: True iff newline should be printed
carriage_return: True iff carriage return should be printed; also
clears line with ANSI escape code
No return value.
"""
full_message = ('\x1b[K' + message + ('\r' if carriage_return else '')
+ (os.linesep if newline else ''))
try:
sys.stderr.write(full_message)
if sys.stderr.isatty():
sys.stderr.flush()
else:
try:
# So the user sees it too
sys.stdout.write(full_message)
sys.stdout.flush()
except UnicodeEncodeError:
sys.stdout.write(
unicodedata.normalize(
'NFKD', full_message
).encode('ascii', 'ignore')
)
sys.stdout.flush()
except UnicodeEncodeError:
sys.stderr.write(
unicodedata.normalize(
'NFKD', full_message
).encode('ascii', 'ignore')
)
sys.stderr.flush()
def timestamp():
""" Returns timestamp string. """
return time.strftime('%A, %b %d, %Y at %I:%M:%S %p %Z || ',
time.localtime(time.time()))
def prettify_dict(rule):
""" Prettifies printout of dictionary as string.
rule: rule
Return value: rule string
"""
return json.dumps(rule, sort_keys=False,
indent=4, separators=(',', ': '))
def get_dough(gdax_client, status_update=False):
""" Retrieve dough in user accounts
gdax_client: instance of gdax.AuthenticatedClient
status_update: True iff status update should be printed
Return value: dictionary mapping currency to account information
"""
dough = {}
for account in gdax_client.get_accounts():
dough[account['currency']] = account['available']
if status_update:
print_to_screen(''.join([timestamp(), 'Available to trade: ',
', '.join(map(' '.join,
[el[::-1] for el in dough.items()]))]))
return dough
class TradeListener(TwythonStreamer):
""" Trades on GDAX based on tweets. """
def __init__(self, rules, gdax_client,
app_key, app_secret, oauth_token, oauth_token_secret,
timeout=300, retry_count=None, retry_in=10, client_args=None,
handlers=None, chunk_size=1, sleep_time=0.5):
super(TradeListener, self).__init__(
app_key, app_secret, oauth_token, oauth_token_secret,
timeout=300, retry_count=None, retry_in=10, client_args=None,
handlers=None, chunk_size=1
)
self.rules = rules
self.gdax_client = gdax_client
self.sleep_time = sleep_time
self.available = get_dough(self.gdax_client, status_update=False)
self.public_client = gdax.PublicClient() # for product order book
def on_success(self, status):
for rule in self.rules:
if ((not rule['handles'])
or status['user']['screen_name'].lower()
in rule['handles']) and ((not rule['keywords'])
or any([keyword in status['text'].lower()
for keyword in rule['keywords']])) and eval(
rule['condition'].format(
tweet='status["text"]',
available=self.available
)):
if (('retweeted_status' in status
and status['retweeted_status'])
or status['in_reply_to_status_id']
or status['in_reply_to_status_id_str']
or status['in_reply_to_user_id']
or status['in_reply_to_user_id_str']
or status['in_reply_to_screen_name']):
# This is an RT or reply; don't do anything
return
# Condition satisfied! Perform action
print_to_screen(
''.join(
[timestamp(), 'TWEET MATCHED || @',
status['user']['screen_name'] , ': ',
status['text']]
)
)
for order in rule['orders']:
self.available = get_dough(self.gdax_client,
status_update=True)
order_book = self.public_client.get_product_order_book(
order['product_id']
)
inside_bid, inside_ask = (
order_book['bids'][0][0],
order_book['asks'][0][0]
)
not_enough = False
for money in ['size', 'funds', 'price']:
try:
'''If the hundredths rounds down to zero,
ain't enough'''
order[money] = str(eval(
order[money].format(
tweet='status.text',
available=self.available,
inside_bid=inside_bid,
inside_ask=inside_ask
)
))
not_enough = (
int(float(order[money]) * 100) == 0
)
except KeyError:
pass
print_to_screen(''.join(
[timestamp(), 'PLACING ORDER', os.linesep] +
[prettify_dict(order)]
))
if not_enough:
print_to_screen(
timestamp() +
'One of {"price", "funds", "size"} is zero! ' +
'Order not placed.'
)
return
if order['side'] == 'buy':
self.gdax_client.buy(**order)
else:
assert order['side'] == 'sell'
self.gdax_client.sell(**order)
print_to_screen(timestamp() + 'Order placed.')
time.sleep(self.sleep_time)
get_dough(self.gdax_client, status_update=True)
def on_error(self, status_code, status):
if status_code == 420:
# Rate limit error; bail and wait to reconnect
self.disconnect()
def go():
""" Entry point """
# Print file's docstring if -h is invoked
parser = argparse.ArgumentParser(description=_help_intro,
formatter_class=help_formatter)
subparsers = parser.add_subparsers(help=(
'subcommands; add "-h" or "--help" '
'after a subcommand for its parameters'),
dest='subparser_name'
)
config_parser = subparsers.add_parser(
'configure',
help=(
'creates profile for storing keys/secrets; '
'all keys are stored in "{}".'.format(
os.path.join(
os.path.expanduser('~'),
'.vickitrix',
'config')
)
)
)
trade_parser = subparsers.add_parser(
'trade',
help='trades based on tweets'
)
# Add command-line arguments
trade_parser.add_argument('--profile', '-p', type=str, required=False,
default='default',
help='which profile to use for trading'
)
trade_parser.add_argument('--rules', '-r', type=str, required=False,
default=os.path.join(os.path.dirname(os.path.realpath(__file__)),
'rules', 'vicki.py'),
help=('rules file; this is Python that sets the variable "rules" '
'to a list of dictionaries')
)
trade_parser.add_argument('--interval', '-i', type=float, required=False,
default=905,
help=('how long to wait (in s) before reattempting to connect '
'after getting rate-limited')
)
trade_parser.add_argument('--sleep', '-s', type=float, required=False,
default=0.5,
help='how long to wait (in s) after an order has been placed'
)
args = parser.parse_args()
key_dir = os.path.join(os.path.expanduser('~'), '.vickitrix')
if args.subparser_name == 'configure':
try:
os.makedirs(key_dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Grab and write all necessary credentials
config_file = os.path.join(key_dir, 'config')
print('Enter a name for a new profile (default): ', end='')
profile_name = input()
if not profile_name: profile_name = 'default'
salt = Random.new().read(AES.block_size)
key = KDF.PBKDF2(getpass.getpass((
'Enter a password for this profile. The password will be used '
'to generate a key so all GDAX/Twitter passcodes/secrets '
'written to {} are further encoded with AES256. '
'You will have to enter a profile\'s password every time you '
'run "vickitrix trade": '
).format(config_file)), salt,
dkLen=32, count=_key_derivation_iterations)
previous_lines_to_write = []
if os.path.exists(config_file):
'''Have to check if the profile exists already. If it does, replace
it. Assume the config file is under vickitrix's control and thus
has no errors; if the user chooses to mess it up, that's on
them.'''
with open(config_file, 'rU') as config_stream:
line = config_stream.readline().rstrip('\n')
while line:
if line[0] == '[' and line[-1] == ']':
if profile_name == line[1:-1]:
# Skip this profile
for _ in range(8): config_stream.readline()
line = config_stream.readline().rstrip('\n')
continue
previous_lines_to_write.append(line)
for _ in range(8):
previous_lines_to_write.append(
config_stream.readline().rstrip('\n')
)
line = config_stream.readline().rstrip('\n')
with open(config_file, 'w') as config_stream:
print(''.join(['[', profile_name, ']']), file=config_stream)
# Now change permissions
try:
os.chmod(config_file, 0o600)
except OSError as e:
if e.errno == errno.EPERM:
print >>sys.stderr, (
('Warning: could not change permissions of '
'"{}" so it\'s readable/writable by only the '
'current user. If there are other users of this '
'system, they may be able to read your credentials '
'file.').format(
config_file
)
)
raise
with open(config_file, 'a') as config_stream:
print(''.join(['Salt: ', base64.b64encode(salt).decode()]),
file=config_stream)
for token in ['GDAX key', 'GDAX secret', 'GDAX passphrase',
'Twitter consumer key', 'Twitter consumer secret',
'Twitter access token key',
'Twitter access token secret']:
if 'key' in token:
print(''.join(['Enter ', token, ': ']), end='')
'''Write it in plaintext if it's a public key; then the
user can open the config file and know which keys are in
use.'''
print(''.join([token, ': ', input()]),
file=config_stream)
else:
# A warning to developers in a variable name
unencoded_and_not_to_be_written_to_disk = getpass.getpass(
''.join(['Enter ', token, ': '])
)
iv = Random.new().read(AES.block_size)
cipher = AES.new(key, AES.MODE_CFB, iv)
print(''.join([
token,
' (AES256-encrypted using profile password): ',
base64.b64encode(iv + cipher.encrypt(
unencoded_and_not_to_be_written_to_disk
)).decode()]), file=config_stream)
for line in previous_lines_to_write:
print(line, file=config_stream)
print(('Configured profile "{}". Encrypted credentials have been '
'stored in "{}". '
'Now use the "trade" subcommand to '
'trigger trades with new tweets.').format(
profile_name,
config_file
))
elif args.subparser_name == 'trade':
# Set and check rules
from imp import load_source
try:
rules = load_source('rules', args.rules).rules
except IOError as e:
e.message = 'Cannot find or access rules file "{}".'.format(
args.rules
)
raise
import copy
# Add missing keys so listener doesn't fail
new_rules = copy.copy(rules)
order_vocab = set(['client_oid', 'type', 'side', 'product_id', 'stp',
'price', 'size', 'time_in_force', 'cancel_after',
'post_only', 'funds', 'overdraft_enabled',
'funding_amount'])
for i, rule in enumerate(rules):
# Check 'condition'
try:
eval(rule['condition'].format(
tweet='"The rain in Spain stays mainly in the plain."',
available={
'ETH' : .01,
'USD' : .01,
'LTC' : .01,
'BTC' : .01
}
))
except KeyError:
# 'condition' isn't required, so make default True
new_rules[i]['condition'] = 'True'
except:
raise RuntimeError(''.join([
('"condition" from the following rule in the file '
'"{}" could not be '
'evaluated; check the format '
'and try again: ').format(args.rules),
os.linesep, prettify_dict(rule)
])
)
# Check handles or keywords
if 'handles' not in rule and 'keywords' not in rule:
raise RuntimeError(''.join([
('A rule must have at least one of {{"handles", '
'"keywords"}}, but this rule from the file "{}" '
'doesn\'t:').format(args.rules),
os.linesep, prettify_dict(rule)
])
)
if 'handles' not in rule:
new_rules[i]['handles'] = []
if 'keywords' not in rule:
new_rules[i]['keywords'] = []
new_rules[i]['handles'] = [
handle.lower() for handle in new_rules[i]['handles']
]
new_rules[i]['keywords'] = [
keyword.lower() for keyword in new_rules[i]['keywords']
]
'''Validate order; follow https://docs.gdax.com/#orders for
filling in default values.'''
if 'orders' not in rule or not isinstance(rule['orders'], list):
raise RuntimeError(''.join([
('Every rule must have an "orders" list, but '
'this rule from the file "{}" doesn\'t:').format(
args.rules), os.linesep, prettify_dict(rule)
])
)
for j, order in enumerate(rule['orders']):
if not isinstance(order, dict):
raise RuntimeError(''.join([
('Every order must be a dictionary, but order #{} '
'from this rule in the file "{}" isn\'t:').format(
j+1, args.rules), os.linesep, prettify_dict(rule)]))
unrecognized_keys = [
key for key in order if key not in order_vocab
]
if unrecognized_keys:
raise RuntimeError(''.join([
'In the file "{}", the "order" key(s) '.format(
args.rules),
os.linesep, '[',
', '.join(unrecognized_keys), ']', os.linesep,
('are invalid yet present in order #{} of '
'the following rule:').format(j+1),
os.linesep, prettify_dict(rule)
]))
try:
if order['type'] not in [
'limit', 'market', 'stop'
]:
raise RuntimeError(''.join([
('An order\'s "type" must be one of {{"limit", '
'"market", "stop"}}, which order #{} in this '
'rule from the file "{}" doesn\'t '
'satisfy:').format(j+1, args.rules),
os.linesep, prettify_dict(rule)
]))
except KeyError:
# GDAX default is limit
new_rules[i]['orders'][j]['type'] = 'limit'
if 'side' not in order:
raise RuntimeError(''.join([
('An order must have a "side", but order #{} in '
'this rule from the file "{}" doesn\'t:').format(
j+1, args.rules), os.linesep, prettify_dict(rule)
])
)
if order['side'] not in ['buy', 'sell']:
raise RuntimeError(''.join([
('An order\'s "side" must be one of {{"buy", '
'"sell"}}, which order #{} in this rule '
'from the file "{}" doesn\'t satisfy:').format(
j+1, args.rules), os.linesep, prettify_dict(rule)
])
)
if 'product_id' not in order:
raise RuntimeError(''.join([
('An order must have a "product_id", but in the '
'file "{}", order #{} from this rule '
'doesn\'t:').format(args.rules, j+1),
os.linesep, prettify_dict(rule)
]))
if new_rules[i]['orders'][j]['type'] == 'limit':
for item in ['price', 'size']:
if item not in order:
raise RuntimeError(''.join([
('If an order\'s "type" is "limit", the order '
'must specify a "{}", but in the file "{}",'
'order #{} from this rule doesn\'t:').format(
item, args.rules, j+1),
os.linesep, prettify_dict(rule)
]))
elif new_rules[i]['orders'][j]['type'] in ['market', 'stop']:
if 'size' not in order and 'funds' not in order:
raise RuntimeError(''.join([
('If an order\'s "type" is "{}", the order '
'must have at least one of {{"size", '
'"funds"}}, but in file "{}", order #{} '
'of this rule doesn\'t:').format(
new_rules[i]['orders'][j]['type'],
args.rules, j+1
), os.linesep, prettify_dict(rule)]))
for stack in ['size', 'funds', 'price']:
try:
eval(order[stack].format(
tweet=('"The rain in Spain stays mainly '
'in the plain."'),
available={
'ETH' : .01,
'USD' : .01,
'LTC' : .01,
'BTC' : .01
}, inside_bid=200, inside_ask=200))
except KeyError:
pass
except Exception as e:
raise RuntimeError(''.join([
('"{}" from order #{} in the following '
'rule from the file "{}" could not be '
'evaluated; check the format '
'and try again:').format(
stack, j+1, args.rules
), os.linesep, prettify_dict(rule)]))
rules = new_rules
# Use _last_ entry in config file with profile name
key = None
try:
with open(os.path.join(key_dir, 'config'), 'rU') as config_stream:
line = config_stream.readline().rstrip('\n')
while line:
profile_name = line[1:-1]
if profile_name == args.profile:
salt = base64.b64decode(
config_stream.readline().rstrip(
'\n').partition(': ')[2]
)
if key is None:
key = KDF.PBKDF2(getpass.getpass(
'Enter password for profile "{}": '.format(
profile_name
)
), salt,
dkLen=32, count=_key_derivation_iterations
)
keys_and_secrets = []
for _ in range(7):
item, _, encoded = config_stream.readline().rstrip(
'\n').partition(': ')
if 'key' in item:
# Not actually encoded; remove leading space
keys_and_secrets.append(encoded)
continue
encoded = base64.b64decode(encoded)
cipher = AES.new(
key, AES.MODE_CFB,
encoded[:AES.block_size]
)
keys_and_secrets.append(
cipher.decrypt(
encoded
)[AES.block_size:]
)
else:
# Skip profile
for _ in range(8): config_stream.readline()
line = config_stream.readline().rstrip('\n')
except IOError as e:
e.message = (
'Cannot find vickitrix config file. Use '
'"vickitrix configure" to configure vickitrix '
'before trading.'
)
raise
try:
# Instantiate GDAX and Twitter clients
gdax_client = gdax.AuthenticatedClient(
*keys_and_secrets[:3]
)
# Are they working?
get_dough(gdax_client, status_update=True)
twitter_client = Twython(*keys_and_secrets[3:7])
trade_listener = TradeListener(
*([rules, gdax_client] + keys_and_secrets[3:7]),
sleep_time=args.sleep
)
except Exception as e:
from traceback import format_exc
print_to_screen(format_exc())
print_to_screen(''.join(
[os.linesep,
'Chances are, this opaque error happened because either ',
os.linesep,
'a) You entered incorrect security credentials '
'when you were configuring vickitrix.',
os.linesep,
'b) You entered the wrong password above.']
))
exit(1)
print_to_screen('Twitter/GDAX credentials verified.')
# Get all handles to monitor
handles, keywords = set(), set()
for rule in rules:
handles.update(rule['handles'])
keywords.update(rule['keywords'])
handles_to_user_ids = {}
for handle in handles:
try:
handles_to_user_ids[handle] = twitter_client.show_user(
screen_name=handle
)['id_str']
except TwythonError as e:
if 'User not found' in e.message:
print(
'Handle {} not found; skipping rule...'.format(handle)
)
else:
raise
if not handles_to_user_ids:
raise RuntimeError('No followable Twitter handles found in rules!')
while True:
print_to_screen('Listening for tweets; hit CTRL+C to quit...')
trade_listener.statuses.filter(
follow=handles_to_user_ids.values(),
track=list(keywords)
)
print_to_screen(
timestamp()
+ 'Rate limit error. Restarting in {} s...'.format(
args.interval
)
)
time.sleep(args.interval)
| [
"[email protected]"
]
| |
080728dd79f7bff9d345033a81fe4b83e3180222 | 4e47bb6c804150f8be2c7aee96718c8347765cf8 | /sample.py | bfd316339170a9eddb2844089423ec1a214dfd3a | []
| no_license | vvasuki/misc-python | 89955529b32bf32cf06ab726319a2ccbb6e6accb | 5d6d53bfec0dc1f85c24bb5e0cf6e2fcec31a389 | refs/heads/master | 2022-12-09T14:30:20.149062 | 2022-12-01T04:14:17 | 2022-12-01T04:14:17 | 149,946,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,495 | py | #! /usr/bin/python
#easy to use python documentation.. intended for reference and reuse of source code (sample code) slices.
#for help: install python-docs package.
#see this then: file:///usr/share/doc/python-docs-2.4.1/html/tut/tut.html
#to enter interactive mode, type: python
#to exit python shell: EOF character .. ^d
#you can set an environment variable named PYTHONSTARTUP to the name of a file containing your start-up commands.
#interpreter can act as a calculator
#arithmatic operators as in c.
#>>> width = 20
#>>> height = 5*9
#>>> width * height
#900
#9+_ #note underscore (implicit variable)
#909
#complex numbers too
#>>> 1j * 1J
#(-1+0j)
#>>> 1j * complex(0,1)
#(-1+0j)
#>>> a=1.5+0.5j
#>>> a.real
#1.5
#>>> a.imag #that is how you print in interactive mode.. directly quote the variable.
#0.5
#"python -c command [arg] ..."
#"python -m module [arg] ...", which executes the source file for module
#"python file" and "python <file" are different..
#in that the former gets input from stdin.
#sys.argv, a list of strings has the script name and additional arguments from shell.
#no arguments are given,
#sys.argv[0] is an empty string.
#When the script name is given as '-' (meaning standard input), sys.argv[0] is set to '-'.
#When -c command is used, sys.argv[0] is set to '-c'.
#When -m module is used, sys.argv[0] is set to the full name of the located module.
#There are six sequence types: strings, Unicode strings, lists, tuples, buffers, and xrange objects.
#lists are like: [a, b, c]
#tuples are like: a, b, c or () or (d,)
#Buffer objects are not directly supported by Python syntax, but can be created by calling the builtin function buffer().
#Xrange objects are similar to buffers in that there is no specific syntax to create them,
#but they are created using the xrange() function.
#general sequence operators:
#in, not in, +, *, s[i], s[i:j], s[i:j:k], len, min, max
lstTmp = [[]] * 3
#>>> lists
#[[], [], []]
#>>> lists[0].append(3)
#>>> lists
#[[3], [3], [3]]
lstTmp[0:2] = [] #removed elements.. size of list changable. elemensts replacable too.
#functions on lists:
#append extend insert remove(if the arg is matched) pop(can take args) index count sort reverse
#an inbuilt function to make list of numbers:
rngTmp=range(4)
rngTmp=range(2,8)
iTmp=1
iTmp,iTmp1=1,1
if iTmp:
#indentation is necessary for blocks in python
strTmp="iTmp is 1"
print strTmp, " ", iTmp
strTmp='yeah, both single and double quotes can encapsulate strings.\n\
yeah, note the continuation of the string into the next line.'
print strTmp
#any non-zero integer value is true; zero is false.
#The condition may also be a string or list value, in fact any sequence;
#anything with a non-zero length is true, empty sequences are false.
#comparison operators as in C.
strTmp=r'this is a raw string \
oye. it works thus.'
strTmp="""
another way of writing multiline strings.
"""
strTmp='''
yet another way of writing multiline strings.
'''
strTmp="""
look at this piece of string concatenation!
""" "oye. write them side by side.\n" + "or use the '+' sign\n"+ "muaddib "*5
print strTmp
#slice notation: strTmp[0], strTmp[2,5]
#strTmp[:5] and strTmp[0,5] are the same.
#>>> word[-1] # The last character.. from the right. a negative index is used.
#strTmp[0]='p' is not allowed.
#>>> 'x' + word[1:]
#'xelpA'
#is ok.
#degenerate slices are handled gracefully:
#word='HelpA'
#>>> word[1:100]
#'elpA'
#>>> word[10:]
#''
#>>> word[2:1]
#''
#>>> word[-100:]
#'HelpA'
#>>> word[-10] # error
ustrTmp= u' a unicode \u0020 string !'
#u'a unicode string !'
#the lower 256 characters of Unicode are the same as the 256 characters of Latin-1.
#Codecs can convert are Latin-1, ASCII, UTF-8, and UTF-16.
ustrTmp.encode('utf-8')
print ustrTmp
#string formatting options
strTmp="string formatting or interpolation operator %% is like %(familiarFunction)s" \
%{'familiarFunction':"sprintf()"}
print strTmp;
#the following options may be used in %(varName)[formatting]option:
# d i o u x X e E f F g G c %
# r s (for python objects, using repr and str functions)
#
#the following are string related functions:
#strip() len() capitalize() lower() swapcase() l/rjust() center() l/rstrip() title()
#join(sequenceOfStrings) [r]split(delimiter) splitlines()
#[r]find () count(substr[,start,end]) [r]index() translate(table[, deletechars])
#endswith() startswith()
#isalnum() isalpha() isdigit() islower() isspace() isupper() istitle()
#zfill()
#str(), unicode(), float(), int() and long() convert among datatypes
#decision statements: if, else, elif
#looping:
#while looping: while a<b:
#for statement iterates over the items of any sequence: for x in ['cat', 'window', 'defenestrate']:
#iterate over a sequence of numbers: use for with range.
#looping constructs can have else clauses.
#break and continue are as in C.
def function(iTmp):
#reference to the argument is passed.
#default value may be optionally specified..
#it is the value evaluated at the time of making of the function object.
"this is the function's optional docstring"
print "oye, a function was defined here."
#global variables cannot be directly assigned a value within a function
#(unless named in a global statement), although they may be referenced.
#unless the function explicitly returns something,
#it returns None object.
if iTmp:
return [iTmp]
else:
return
print function.__doc__
#a function is actually an object in the global namespace too.
#function can be referenced only after it is defined... "interpreter".. remember?
print function
print function(0), function(1)
iTmp = 5
def function(arg=iTmp):
print arg
iTmp = 6
#default is evaluated only once. rest of the calls, it is shared...
#to be expected. for the default is filled in when the function object is created.
function() #printeth 5
def function(a, L=[]):
L.append(a)
return L #L has scope only within this here block
print function(1)
print function(2)
print function(3)
print function(1,[])
print function(3) #hehe. [1, 2, 3, 3]
#the above function behaved thusly because the default was a mutable object..
#not an immutable one.. like below.
def function(a, L=None):
if L is None:
L = []
L.append(a)
return L
#keyword arguments.
def function(arg1,arg2='ole',arg3='jo'):
pass #this is an empty statement.
print arg1
function(arg2=99, arg1=0231)
#all functions accept a tuple of arguments in place of passing a literal unpacked sequence.
#the contents of the literal tuple,
#though they may contain references to objects,
#are themselves passed by value.
tupTmp=(0231,99)
function(*tupTmp)
#the * operator unpacks the tuple
#variable number of arguments may be passed as below.
#they may be passed in the form of a tuple of arguments, and
#also as a dictionary (hashtable) of arguments.
def function(arg, *argTuple, ** argDictionary):
#see how a for loop is used with a tuple
for argentum in argTuple: pass
#see how argDictioary is used, and notice the use of the dictionary method keys:
keynen = argDictionary.keys()
#see that the sequence keynen has a method called sort
keynen.sort()
function("sa","asdfa","sdf","asdff",
god="allah",
prophet="mohammed")
#lambda forms from Lisp.. functions used to make function objects
def function(arg):
return lambda argLm: arg+argLm
#Like nested function definitions, lambda forms can reference variables from the containing scope
fnTmp=function(strTmp)
print "lambda land ", fnTmp("sdf")
| [
"[email protected]"
]
| |
b40bac9713b087f67ca3260d194ce949da4c8dae | 73a0f661f1423d63e86489d4b2673f0103698aab | /python/oneflow/nn/modules/math_ops.py | a16ddf4555f82be980156024d8fa893e24247691 | [
"Apache-2.0"
]
| permissive | Oneflow-Inc/oneflow | 4fc3e081e45db0242a465c4330d8bcc8b21ee924 | 0aab78ea24d4b1c784c30c57d33ec69fe5605e4a | refs/heads/master | 2023-08-25T16:58:30.576596 | 2023-08-22T14:15:46 | 2023-08-22T14:15:46 | 81,634,683 | 5,495 | 786 | Apache-2.0 | 2023-09-14T09:44:31 | 2017-02-11T06:09:53 | C++ | UTF-8 | Python | false | false | 7,845 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Optional, Sequence, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.modules.module import Module
from oneflow.nn.modules.utils import _check_axis
from oneflow.ops.transpose_util import (
get_inversed_perm,
get_perm_when_transpose_axis_to_last_dim,
)
def asin_op(input):
"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sin^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)
>>> output = flow.asin(input)
>>> output.shape
oneflow.Size([4])
>>> output
tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)
>>> output1 = input1.asin()
>>> output1.shape
oneflow.Size([2, 2])
>>> output1
tensor([[ 0.9273, 1.5708],
[-0.6435, -1.5708]], dtype=oneflow.float32)
"""
return flow._C.asin(input)
def arcsin_op(input):
"""
Alias for :func:`oneflow.asin`
"""
return flow._C.asin(input)
def asinh_op(input):
"""
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sinh^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)
>>> output = flow.asinh(input)
>>> output.shape
oneflow.Size([3])
>>> output
tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)
>>> output1 = input1.asinh()
>>> output1.shape
oneflow.Size([2, 3])
>>> output1
tensor([[-0.8814, 0.0000, -0.3900],
[ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)
"""
return flow._C.asinh(input)
def arcsinh_op(input):
"""
Alias for :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
def asinh_op_tensor(input):
"""
See :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
def inplace_sin_op_tensor(input):
"""
In-place version of :func:`oneflow.sin`
"""
return flow._C.sin_(input)
def atan_op(input):
"""
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\tan^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)
>>> output = flow.atan(input)
>>> output.shape
oneflow.Size([3])
"""
return flow._C.atan(input)
def arctan_op(input):
"""
Alias for :func:`oneflow.atan`
"""
return flow._C.atan(input)
def fmod_op(input, other):
"""
fmod(input, other, *, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
Supports broadcasting to a common shape, integer and float inputs.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> import oneflow as flow
>>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3]), 2.)
tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.]), 1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))
tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)
"""
return flow._C.fmod(input, other)
def addmm(x, mat1, mat2, alpha=1, beta=1):
if len(x.shape) > 2 or len(mat1.shape) > 2 or len(mat2.shape) > 2:
raise ValueError("input matrixes shape can not be greater than 2")
else:
return flow.mul(x, beta) + flow.mul(flow._C.matmul(mat1, mat2), alpha)
def addmm_op(input, mat1, mat2, alpha=1, beta=1):
"""addmm(beta=1, input, alpha=1, mat1, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \\times m)` tensor, :attr:`mat2` is a
:math:`(m \\times p)` tensor, then :attr:`input` must be
broadcastable with a :math:`(n \\times p)` tensor
and :attr:`out` will be a :math:`(n \\times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\\text{out} = \\beta\\ \\text{input} + \\alpha\\ (\\text{mat1}_i \\mathbin{@} \\text{mat2}_i)
For inputs of type `float` or `double`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\\alpha`)
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
out (Tensor, optional): the output tensor.
For example:
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(np.array([[1,2,4],[5,11,9.1]]))
>>> mat1 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5]]))
>>> mat2 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5],[3.7,2.2,8.1]]))
>>> output = flow.addmm(input, mat1, mat2)
>>> output
tensor([[100.6800, 33.8300, 126.8700],
[110.0100, 43.4800, 133.6100]], dtype=oneflow.float64)
>>> output.shape
oneflow.Size([2, 3])
>>> input2 = flow.tensor(np.array([1.7]))
>>> mat1 = flow.tensor(np.array([[1,2],[5,9.1],[7.7,1.4]]))
>>> mat2 = flow.tensor(np.array([[1,2,3.7],[5,9.1,6.8]]))
>>> output2 = flow.addmm(input2, mat1, mat2, alpha=1, beta=2)
>>> output2
tensor([[14.4000, 23.6000, 20.7000],
[53.9000, 96.2100, 83.7800],
[18.1000, 31.5400, 41.4100]], dtype=oneflow.float64)
>>> output2.shape
oneflow.Size([3, 3])
"""
return addmm(input, mat1, mat2, alpha, beta)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"[email protected]"
]
| |
6697e58f58dc6dc054679c72808f91d06415102d | 88ea7bf2bbc8ffba551e881df553ae5ceac70dd6 | /deblock/codes/models/archs/archs_sub/SRResNet_o2m_spectral_arch.py | 83e92751b2e71f264c06bc251d1ed9cc9b2e4680 | [
"Apache-2.0"
]
| permissive | zhouhuanxiang/repo-zhx | 2d1135bb2f925e051e1b0bcfc2ed53fb34ea51c5 | 76b577eea13130c60bf7bff8c486f51766128661 | refs/heads/main | 2023-06-10T02:56:17.978649 | 2021-06-29T02:35:57 | 2021-06-29T02:35:57 | 381,213,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,389 | py | import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.archs.arch_util as arch_util
class ResidualBlock_Spectral_withZ(nn.Module):
'''Residual block w/o BN
---Conv-ReLU-Conv-+-
|________________|
'''
def __init__(self, ni=65, no=64):
super(ResidualBlock_Spectral_withZ, self).__init__()
self.conv1 = nn.utils.spectral_norm(nn.Conv2d(ni, ni, 3, 1, 1, bias=True))
self.conv2 = nn.utils.spectral_norm(nn.Conv2d(ni, no, 3, 1, 1, bias=True))
# initialization
arch_util.initialize_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = F.relu(self.conv1(x), inplace=True)
out = self.conv2(out)
return identity[:, :out.shape[1], :, :] + out
class MSRResNet(nn.Module):
''' modified SRResNet'''
def __init__(self, in_nc=3, out_nc=3, nf=64, nb=16, upscale=4):
super(MSRResNet, self).__init__()
self.upscale = upscale
self.conv_first = nn.Conv2d(in_nc, nf, 3, 1, 1, bias=True)
# basic_block = functools.partial(ResidualBlock_noBN_withZ, nf=nf)
# self.recon_trunk = arch_util.make_layer(basic_block, nb)
self.recon_trunk = nn.ModuleList([ResidualBlock_Spectral_withZ(nf + 1, nf) for i in range(nb)])
# upsampling
self.upconv1 = nn.Conv2d(nf + 1, nf, 3, 1, 1, bias=True)
self.HRconv = nn.Conv2d(nf + 1, nf, 3, 1, 1, bias=True)
self.conv_last = nn.Conv2d(nf, out_nc, 3, 1, 1, bias=True)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# initialization
arch_util.initialize_weights([self.conv_first, self.upconv1, self.HRconv, self.conv_last],
0.1)
def forward(self, x, z):
out = self.lrelu(self.conv_first(x))
# out = self.recon_trunk(fea)
for layer in self.recon_trunk:
out = layer(torch.cat((out, z), dim=1))
out = self.lrelu(self.upconv1(torch.cat((out, z), dim=1)))
out = self.conv_last(self.lrelu(self.HRconv(torch.cat((out, z), dim=1))))
base = F.interpolate(x, scale_factor=self.upscale, mode='bilinear', align_corners=False)
if out.shape[1] == base.shape[1]:
out += base
else:
out += base[:, :3, :, :]
return out
| [
"[email protected]"
]
| |
9269f28f522d0d9b3083bf0059d3b6ed41848195 | d67ae1b2f20d96b7e36c82c3a298882042c951c5 | /src/asyncf.py | 05a406b445e0dbcbd7eb0341c1360003b928bcfe | [
"MIT"
]
| permissive | Vistaus/my-weather-indicator | 8a99e69fd9d2c03ab5cca578a89da38d6676a5ab | 32aaa77a14cf2f85edbfb72c45d154e1676abe83 | refs/heads/master | 2021-01-02T12:00:00.506304 | 2020-02-11T19:42:47 | 2020-02-11T19:42:47 | 239,614,123 | 0 | 0 | MIT | 2020-02-10T21:11:07 | 2020-02-10T21:11:06 | null | UTF-8 | Python | false | false | 2,549 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# async.py
#
# This file is part of uPodcatcher
#
# Copyright (C) 2014
# Lorenzo Carbonell Cerezo <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gi
try:
gi.require_version('GLib', '2.0')
except Exception as e:
print(e)
exit(1)
from gi.repository import GLib
import threading
import traceback
__all__ = ['async_function']
def _async_call(f, args, kwargs, on_done):
def run(data):
f, args, kwargs, on_done = data
error = None
result = None
try:
result = f(*args, **kwargs)
except Exception as e:
e.traceback = traceback.format_exc()
error = 'Unhandled exception in asyn call:\n{}'.format(e.traceback)
GLib.idle_add(lambda: on_done(result, error))
data = f, args, kwargs, on_done
thread = threading.Thread(target=run, args=(data,))
thread.daemon = True
thread.start()
def async_function(on_done=None):
'''
A decorator that can be used on free functions so they will always be
called asynchronously. The decorated function should not use any resources
shared by the main thread.
Example:
def do_async_stuff(self, input_string):
def on_async_done(result, error):
# Do stuff with the result and handle errors in the main thread.
if error:
print(error)
elif result:
print(result)
@async_function(on_done=on_async_done)
def do_expensive_stuff_in_thread(input_string):
# Pretend to do expensive stuff...
time.sleep(10)
stuff = input_string + ' Done in a different thread'
return stuff
do_expensive_stuff_in_thread(input_string)
'''
def wrapper(f):
def run(*args, **kwargs):
_async_call(f, args, kwargs, on_done)
return run
return wrapper
| [
"[email protected]"
]
| |
1a43fcbec667b510a0a1ff82df246326a83a70fb | eefb06b0d8c8c98c1e9cfc4c3852d5c453eb5429 | /data/input/andersbll/deeppy/deeppy/model/__init__.py | 3fc2414a36c1575b1ca19d8106a70e4a76258fb6 | []
| no_license | bopopescu/pythonanalyzer | db839453bde13bf9157b76e54735f11c2262593a | 8390a0139137574ab237b3ff5fe8ea61e8a0b76b | refs/heads/master | 2022-11-22T02:13:52.949119 | 2019-05-07T18:42:52 | 2019-05-07T18:42:52 | 282,079,884 | 0 | 0 | null | 2020-07-23T23:46:09 | 2020-07-23T23:46:08 | null | UTF-8 | Python | false | false | 169 | py | from .adversarial import AdversarialNet
from .feedforward import FeedForwardNet, ClassifierNet, RegressorNet
from .variational_autoencoder import VariationalAutoencoder
| [
"[email protected]"
]
| |
327ad346e94f6d6d3c3be000b3c703d8718a101f | 784dda4c400d4e5c42f57e9a7d48883692b2a931 | /pyhawkes/utils/data_manager.py | 3dc6d25d07e8bd12b7188d28e252be3626d20442 | [
"MIT"
]
| permissive | yxtj/pyhawkes | bd942aded06dba3dd390a47e28702dcba124961b | ecc6dc23e516a7b06d64e5dbd10c8861b01bd955 | refs/heads/master | 2021-01-15T18:31:12.539149 | 2015-02-25T23:35:50 | 2015-02-25T23:35:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,746 | py | """
Data manager handles loading the .mat file and setting up the data on the GPU
This could be extended if we ever moved to a distributed setup with multiple GPUs
"""
import numpy as np
import scipy.sparse as sparse
import scipy.io
import os
import pycuda.autoinit
import pycuda.compiler as nvcc
import pycuda.driver as cuda
import pycuda.gpuarray as gpuarray
import pycuda.curandom as curandom
from pyhawkes.utils.utils import *
# Define constant for the sparse matrix preprocessing
G_LOGISTIC_NORMAL = 0
import logging
# Get handle to global logger
log = logging.getLogger("global_log")
class GpuData:
"""
Inner class to store pointers on the GPU
"""
def __init__(self):
self.Ns = None
self.cumSumNs = None
self.X = None
class DataSet:
"""
Wrapper for a spike data set
"""
def __init__(self):
self.gpu = GpuData()
def loadFromFile(self, path, sortByBlock=False):
"""
Load the specified mat file
"""
mat_data = scipy.io.loadmat(path, appendmat=True)
self.N = int(mat_data["N"])
if "Tstart" in mat_data.keys() and "Tstop" in mat_data.keys():
self.Tstart = float(mat_data["Tstart"])
self.Tstop = float(mat_data["Tstop"])
elif "T" in mat_data.keys():
self.Tstart = 0
self.Tstop = float(mat_data["T"])
else:
log.error("Neither (Tstart,Tstop) nor T were specified in the mat file")
exit()
Sraw = np.ravel(mat_data["S"]).astype(np.float32)
# Some datasets do not have process IDs
if "K" in mat_data.keys() and"C" in mat_data.keys():
self.proc_ids_known = True
self.K = int(mat_data["K"])
Craw = (np.ravel(mat_data["C"])).astype(np.int32)
# Make sure the process IDs are 0-based
if np.max(Craw)==self.K and np.min(Craw)==1:
# The data file is 1-indexed (i.e. generated in Matlab most likely
Craw = Craw -1
else:
# Default to all spikes on the same process. This will be changed
# during inference
self.proc_ids_known = False
self.K = 1
Craw = np.zeros((self.N,), dtype=np.int32)
# Some datasets have associated spatial locations for each spike
# If so, X must be a DxN matrix where D is the dimension of the spatial data
if "X" in mat_data.keys():
self.isspatial = True
Xraw = mat_data["X"].astype(np.float32)
# Make sure Xraw is a DxN matrix
if np.size(Xraw,0)==self.N:
log.debug("Given X is NxD rather than DxN. Transposing...")
Xraw = Xraw.T
self.D = np.size(Xraw,0)
else:
self.isspatial = False
self.X = None
self.D = 0
if not sortByBlock:
(I, Ns, cumSumNs) = self.__argsortSCArray(self.K, Sraw, Craw)
else:
(I, Ns, cumSumNs) = self.__argsortSCArrayByBlock(self.K, Sraw, Craw)
# (I, Ns, cumSumNs) = self.__argsortSCArray(self.K, , Craw)
self.S = Sraw[I]
self.C = Craw[I]
if self.isspatial:
# Slicing with I changes the view and orders as if it were NxD matrix
self.X = np.zeros((self.D,self.N), dtype=np.float32)
for n in np.arange(self.N):
self.X[:,n] = Xraw[:,I[n]]
self.Ns = Ns
self.maxNs = np.max(Ns)
self.cumSumNs = cumSumNs
# Store remaining keys
self.other_data = {}
for key in mat_data.keys():
if key not in ["S","K","C","T","N","X","D"]:
self.other_data[key] = mat_data[key]
self.__initializeGpuArrays()
def loadFromArray(self,N,K,Tstart,Tstop,S,C,X=None,D=0,other_data={},proc_ids_known=True, sortByBlock=False):
"""
Initialize a DataSet object with the given parameters
"""
self.N = N
self.K = K
self.Tstart = Tstart
self.Tstop = Tstop
self.other_data = other_data
self.proc_ids_known = proc_ids_known
self.isspatial = (X!=None)
self.D = D
self.X = None
if N == 0:
self.S = S
self.C = C
self.Ns = np.zeros(K)
return
# Make sure the process IDs are 0-based
if np.max(C)==self.K and np.min(C)==1:
# The data file is 1-indexed (i.e. generated in Matlab most likely
C = C -1
if not sortByBlock:
(I, Ns, cumSumNs) = self.__argsortSCArray(self.K, S, C)
else:
(I, Ns, cumSumNs) = self.__argsortSCArrayByBlock(self.K, S, C)
self.S = S[I]
self.C = C[I]
if self.isspatial:
# Slicing with I changes the view and orders as if it were NxD matrix
self.X = np.zeros((self.D,self.N), dtype=np.float32)
for n in np.arange(self.N):
self.X[:,n] = X[:,I[n]]
self.Ns = Ns
self.maxNs = np.max(Ns)
self.cumSumNs = cumSumNs
# Set correct types
self.S = np.float32(self.S)
self.C = np.int32(self.C)
self.Ns = np.int32(self.Ns)
self.N = int(self.N)
self.K = int(self.K)
self.D = int(self.D)
self.X = np.float32(self.X)
self.__initializeGpuArrays()
def __initializeGpuArrays(self):
"""
Add a dictionary of GPU pointers
"""
self.gpu.Ns = gpuarray.to_gpu(self.Ns.astype(np.int32))
self.gpu.cumSumNs = gpuarray.to_gpu(self.cumSumNs.astype(np.int32))
if self.isspatial:
# self.gpu.X = gpuarray.empty((self.D,self.N), dtype=np.float32)
# self.gpu.X.set(self.X.astype(np.float32))
self.gpu.X = gpuarray.to_gpu(self.X.astype(np.float32))
def __argsortSCArray(self,K,S,C):
"""
Sort an array of spikes, first by their processes, then by their spike times.
We assume S is already sorted but C is not.
"""
# Keep an array of spike counts
Ns = np.zeros(K, dtype=np.int32)
N = np.size(S)
assert np.size(C) == N, "ERROR: Size of S and C do not match!"
# Compute a permutation of S,C,X such that S is sorted in increasing order
Iflat = np.argsort(S)
# Compute Ns
for k in np.arange(K):
Ns[k] = np.count_nonzero(C==k)
# Also compute the cumulative sum of Ns
cumSumNs = np.cumsum(np.hstack(([0], Ns)), dtype=np.int32)
return (Iflat, Ns, cumSumNs)
def __argsortSCArrayByBlock(self,K,S,C):
"""
Sort an array of spikes, first by their processes, then by their spike times.
We assume S is already sorted but C is not.
"""
# Keep an array of spike counts
Ns = np.zeros(K, dtype=np.int32)
N = np.size(S)
assert np.size(C) == N, "ERROR: Size of S and C do not match!"
# Initialize buffers to hold the per-process indices
ppI = {}
buff_sz = int(2*N/K)
for k in np.arange(K):
ppI[k] = np.zeros(buff_sz)
for n in np.arange(N):
cn = C[n]
try:
ppI[cn][Ns[cn]] = n
except:
# Index out of bounds -- grow buffer
ppI[cn] = np.hstack((ppI[cn], np.zeros(buff_sz)))
ppI[cn][Ns[cn]] = n
Ns[cn] += 1
# Flatten the permutation
Iflat = np.zeros(N, dtype=np.int)
off = 0
for k in np.arange(K):
Iflat[off:off+Ns[k]] = ppI[k][:Ns[k]]
off += Ns[k]
# Also compute the cumulative sum of Ns
cumSumNs = np.cumsum(np.hstack(([0], Ns)), dtype=np.int32)
return (Iflat, Ns, cumSumNs)
class DataManager:
def __init__(self, configFile, dataFile=None):
"""
Load the data and preprocess it on the GPU.
"""
self.parse_config_file(configFile)
if not dataFile is None:
self.params["data_file"] = dataFile
pprint_dict(self.params, "Data Manager Params")
def preprocess_for_inference(self, sortByBlock=False):
"""
Load all of the data
"""
data = DataSet()
mat_file = os.path.join(self.params["data_dir"], self.params["data_file"])
data.loadFromFile(mat_file, sortByBlock=sortByBlock)
return data
def preprocess_for_cross_validation(self, sortByBlock=False):
"""
Load all of the data
"""
data = DataSet()
mat_file = os.path.join(self.params["data_dir"], self.params["xv_file"])
data.loadFromFile(mat_file, sortByBlock=sortByBlock)
return data
def preprocess_for_prediction_test(self, Tsplit=0, trainFrac=0.9, sortByBlock=False):
"""
Load all of the data onto the GPU for parameter inference
"""
data = DataSet()
mat_file = os.path.join(self.params["data_dir"], self.params["data_file"])
data.loadFromFile(mat_file)
(trainData, testData) = self.split_test_train_data(data, Tsplit, trainFrac, sortByBlock=sortByBlock)
log.info("Train: %d spikes in time [%.2f,%.2f]", trainData.N, trainData.Tstart,trainData.Tstop)
log.info("Test: %d spikes in time [%.2f,%.2f]", testData.N, testData.Tstart,testData.Tstop)
return (trainData, testData)
def parse_config_file(self, configFile):
"""
Parse the config file for data manager params
"""
# Initialize defaults
defaultParams = {}
# Data location
defaultParams["data_dir"] = "."
defaultParams["xv_file"] = "not given"
# CUDA kernels are defined externally in a .cu file
defaultParams["cu_dir"] = os.path.join("pyhawkes", "cuda", "cpp")
defaultParams["cu_file"] = "preprocessing_unknown_procs.cu"
# Block size
defaultParams["blockSz"] = 1024
# Window the data such that only spikes within a fixed time window can
# have an effect. It is important that this be consistent with the
# prior on the impulse response
defaultParams["dt_max"] = 5.0
defaultParams["max_hist"] = 10*1024
# Create a config parser object and read in the file
cfgParser = ConfigParser(defaultParams)
cfgParser.read(configFile)
# Create an output params dict. The config file is organized into
# sections. Read them one at a time
self.params = {}
self.params["data_dir"] = cfgParser.get("io", "data_dir")
self.params["data_file"] = cfgParser.get("io", "data_file")
self.params["xv_file"] = cfgParser.get("io", "xv_file")
self.params["blockSz"] = cfgParser.getint("cuda", "blockSz")
self.params["cu_dir"] = cfgParser.get("preprocessing", "cu_dir")
self.params["cu_file"] = cfgParser.get("preprocessing", "cu_file")
self.params["dt_max"] = cfgParser.getfloat("preprocessing", "dt_max")
self.params["max_hist"] = cfgParser.getint("preprocessing", "max_hist")
def initialize_gpu_kernels(self):
kernelSrc = os.path.join(self.params["cu_dir"], self.params["cu_file"])
kernelNames = ["computeColumnSizes",
"computeRowIndicesAndDs",
"computeDx"]
src_consts = {"B" : self.params["blockSz"]}
self.gpuKernels = compile_kernels(kernelSrc, kernelNames, srcParams=src_consts)
def initialize_known_proc_gpu_kernels(self):
kernelSrc = os.path.join(self.params["cu_dir"], self.params["cu_file"])
kernelNames = ["computeColPtrs",
"computeDsBufferSize",
"computeRowAndDsOffsets",
"computeRowIndicesAndDs",
"computeColumnSizes",
"computeRowIndicesAndDs"]
src_consts = {"B" : self.params["blockSz"]}
self.gpuKernels = compile_kernels(kernelSrc, kernelNames, srcParams=src_consts)
def split_test_train_data(self, alldata, Tsplit=0, trainFrac=0.9, sortByBlock=False):
"""
Split the data into test and train subsets
alldata must be a sorted Dataset
"""
# First make sure the spike are sorted by time, not by block
# Compute a permutation of S,C,X such that S is sorted in increasing order
Iflat = np.argsort(alldata.S)
S = alldata.S[Iflat]
C = alldata.C[Iflat]
X = alldata.X[:,Iflat] if alldata.X!=None else None
if Tsplit > 0:
# Find the index of the first spike after Tsplit
split_ind = np.min(np.nonzero(S>Tsplit)[0])
elif trainFrac > 0:
split_ind = int(np.floor(trainFrac*alldata.N))
Tsplit = (S[split_ind-1] + S[split_ind])/2.0
else:
log.error("Either Tsplit or trainFrac must be specified!")
exit()
# Create two datasets
trainData = self.get_data_in_interval(alldata,(0,Tsplit), sortByBlock=sortByBlock)
testData = self.get_data_in_interval(alldata,(Tsplit, alldata.T), sortByBlock=sortByBlock)
return (trainData, testData)
def get_data_in_interval(self, alldata, (T_start,T_stop), sortByBlock=False):
"""
Split the data into test and train subsets
alldata must be a sorted Dataset
"""
# First make sure the spike are sorted by time, not by block
# Compute a permutation of S,C,X such that S is sorted in increasing order
Iflat = np.argsort(alldata.S)
S = alldata.S[Iflat]
C = alldata.C[Iflat]
X = alldata.X[:,Iflat] if alldata.X!=None else None
# Find the index of the first spike after Tsplit
start_ind = np.min(np.nonzero(S>T_start)[0])
stop_ind = np.max(np.nonzero(S<T_stop)[0])+1
# Create two datasets
data = DataSet()
data.loadFromArray(stop_ind-start_ind,
alldata.K,
T_start,
T_stop,
S[start_ind:stop_ind],
C[start_ind:stop_ind],
X=X[:,start_ind:stop_ind] if X!=None else None,
D=alldata.D,
other_data=alldata.other_data,
proc_ids_known=alldata.proc_ids_known,
sortByBlock=sortByBlock)
return data
def compute_sparse_spike_intvl_matrices(self, dataSet1, dataSet2):
"""
preprocess the given datasets by computing the intervals between spikes on S1
and spikes on S2 and storing them in a sparse matrix format on the GPU.
The GPU kernels require the spikes to be sorted, first in C and then in S, so
all the spikes on process 0 come first, and within the spikes on process 0
they are sorted in increasing order of S.
"""
# Initialize the kernels with the size of the dataset
self.initialize_known_proc_gpu_kernels()
# Temporarily copy both sets of spike times to the GPU
S1_gpu = gpuarray.to_gpu(dataSet1.S.astype(np.float32))
S2_gpu = gpuarray.to_gpu(dataSet2.S.astype(np.float32))
# Now we can preprocess the interspike intervals on the GPU
# First compute the size of each column for each matrix
# Each spike appears in K1 matrices, so there are K1*N2 columns
colStartBuffer_gpu = gpuarray.empty((dataSet1.K,dataSet2.N), dtype=np.int32)
colEndBuffer_gpu = gpuarray.empty((dataSet1.K,dataSet2.N), dtype=np.int32)
colSizesBuffer_gpu = gpuarray.empty((dataSet1.K,dataSet2.N), dtype=np.int32)
grid_w = int(np.ceil(float(dataSet2.N)/self.params["blockSz"]))
status_gpu = gpuarray.zeros((dataSet1.K,grid_w),dtype=np.int32)
self.gpuKernels["computeColumnSizes"](np.float32(self.params["dt_max"]),
dataSet1.gpu.Ns.gpudata,
dataSet1.gpu.cumSumNs.gpudata,
S1_gpu.gpudata,
np.int32(dataSet2.N),
S2_gpu.gpudata,
colStartBuffer_gpu.gpudata,
colEndBuffer_gpu.gpudata,
colSizesBuffer_gpu.gpudata,
status_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,dataSet1.K)
)
# Compute the column pointers (the cumulative sum) of the
# column sizes for each matrix. There are K1xK2 grid of matrices
colPtrsBuffer_gpu = gpuarray.zeros((dataSet1.K,(dataSet2.N+dataSet2.K)), dtype=np.int32)
colPtrOffsets_gpu = gpuarray.zeros((dataSet1.K,dataSet2.K), dtype=np.int32)
self.gpuKernels["computeColPtrs"](np.int32(dataSet1.K),
np.int32(dataSet2.N),
dataSet2.gpu.Ns.gpudata,
dataSet2.gpu.cumSumNs.gpudata,
colSizesBuffer_gpu.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
block=(1,1,1),
grid=(dataSet1.K,dataSet2.K)
)
# Compute the required size of the data and row buffer
bufferSize_gpu = gpuarray.zeros(1, dtype=np.int32)
self.gpuKernels["computeDsBufferSize"](np.int32(dataSet1.K),
dataSet2.gpu.Ns.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
bufferSize_gpu.gpudata,
block=(1,1,1),
grid=(1,1)
)
bufferSize = int(bufferSize_gpu.get()[0])
log.debug("dS has %d nonzero entries" % bufferSize)
dsBuffer_gpu = gpuarray.empty((bufferSize,), dtype=np.float32)
rowIndicesBuffer_gpu = gpuarray.zeros((bufferSize,), dtype=np.int32)
# Compute the offsets into these buffers for each matrix
rowAndDsOffsets_gpu = gpuarray.empty((dataSet1.K,dataSet2.K), dtype=np.int32)
self.gpuKernels["computeRowAndDsOffsets"](np.int32(dataSet1.K),
dataSet2.gpu.Ns.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
rowAndDsOffsets_gpu.gpudata,
block=(1,1,1),
grid=(1,1)
)
# Now we can actually fill in row and ds buffers
self.gpuKernels["computeRowIndicesAndDs"](np.int32(G_LOGISTIC_NORMAL),
np.int32(dataSet1.K),
dataSet1.gpu.Ns.gpudata,
dataSet1.gpu.cumSumNs.gpudata,
S1_gpu.gpudata,
np.int32(dataSet2.N),
dataSet2.gpu.cumSumNs.gpudata,
S2_gpu.gpudata,
colStartBuffer_gpu.gpudata,
colEndBuffer_gpu.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
rowIndicesBuffer_gpu.gpudata,
dsBuffer_gpu.gpudata,
rowAndDsOffsets_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,dataSet1.K)
)
# If this is a spatial dataset then also compute dX
dxBuffer_gpu = None
if dataSet1.isspatial and dataSet2.isspatial:
D = dataSet1.D
assert dataSet2.D == D, "Error: two datasets have different spatial dimensions"
dxBuffer_gpu = gpuarray.empty((D*bufferSize,), dtype=np.float32)
# Copy the spatial data to the GPU
X1_gpu = gpuarray.to_gpu(dataSet1.X.astype(np.float32))
X2_gpu = gpuarray.to_gpu(dataSet2.X.astype(np.float32))
self.gpuKernels["computeDx"](np.int32(D),
np.int32(dataSet1.N),
dataSet1.gpu.cumSumNs.gpudata,
X1_gpu.gpudata,
np.int32(dataSet2.N),
dataSet2.gpu.cumSumNs.gpudata,
X2_gpu.gpudata,
rowIndicesBuffer_gpu.gpudata,
colPtrsBuffer_gpu.gpudata,
colPtrOffsets_gpu.gpudata,
rowAndDsOffsets_gpu.gpudata,
dxBuffer_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,dataSet1.K)
)
ds = dsBuffer_gpu.get()
# assert np.all(ds < self.params["dt_max"]), "ERROR: DS contains entries equal to dt_max!"
# assert np.all(ds > 0), "ERROR: DS contains entries equal to 0!"
# Update gpuData dictionary
gpuData = {}
gpuData["dsBuffer_size"] = bufferSize
gpuData["dsBuffer_gpu"] = dsBuffer_gpu
gpuData["rowIndicesBuffer_gpu"] = rowIndicesBuffer_gpu
gpuData["colPtrsBuffer_gpu"] = colPtrsBuffer_gpu
gpuData["rowAndDsOffsets_gpu"] = rowAndDsOffsets_gpu
gpuData["colPtrOffsets_gpu"] = colPtrOffsets_gpu
gpuData["dxBuffer_gpu"] = dxBuffer_gpu
return gpuData
def compute_sparse_spike_intvl_matrix_unknown_procs(self, S1, S2):
"""
In the case where the process identities are unknown and to be inferred,
it does not make sense to have a grid of sparse matrices for each pair of
process identities. Instead, create a single sparse matrix for spike intervals
"""
# Initialize the kernels with the size of the dataset
self.initialize_gpu_kernels()
# Temporarily copy both sets of spike times to the GPU
N1 = len(S1)
N2 = len(S2)
# Handle the case where there are no spikes, N2=0
if N2 == 0:
gpuData = {}
gpuData["dS_size"] = 0
gpuData["dS"] = gpuarray.zeros(1, dtype=np.float32)
gpuData["rowIndices"] = gpuarray.zeros(1, dtype=np.float32)
gpuData["colPtrs"] = gpuarray.zeros(1, dtype=np.float32)
return gpuData
S1_gpu = gpuarray.to_gpu(S1.astype(np.float32))
S2_gpu = gpuarray.to_gpu(S2.astype(np.float32))
# Now we can preprocess the interspike intervals on the GPU
# First compute the size of each column for each matrix
# Each spike appears in K1 matrices, so there are K1*N2 columns
colStart_gpu = gpuarray.empty((N2,), dtype=np.int32)
colEnd_gpu = gpuarray.empty((N2,), dtype=np.int32)
colSizes_gpu = gpuarray.empty((N2,), dtype=np.int32)
grid_w = int(np.ceil(float(N2)/self.params["blockSz"]))
self.gpuKernels["computeColumnSizes"](np.float32(self.params["dt_max"]),
np.int32(N1),
S1_gpu.gpudata,
np.int32(N2),
S2_gpu.gpudata,
colStart_gpu.gpudata,
colEnd_gpu.gpudata,
colSizes_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,1)
)
# Compute the column pointers (the cumulative sum) of the col sizes
colSizes = colSizes_gpu.get()
colPtrs = np.cumsum(np.hstack(([0],colSizes))).astype(np.int32)
colPtrs_gpu = gpuarray.to_gpu(colPtrs)
# Compute the required size of the data and row buffer
bufferSize = int(colPtrs[-1])
log.debug("dS has %d nonzero entries" % bufferSize)
if bufferSize == 0:
log.warning("There are no preceding parents. Potential parent matrix is empty!")
log.debug("Setting buffer size to 1.")
bufferSize = 1
dS_gpu = gpuarray.empty((bufferSize,), dtype=np.float32)
dS_gpu.fill(1.0)
rowIndices_gpu = gpuarray.zeros((bufferSize,), dtype=np.int32)
# Now we can actually fill in row and ds buffers
self.gpuKernels["computeRowIndicesAndDs"](np.int32(G_LOGISTIC_NORMAL),
S1_gpu.gpudata,
np.int32(N2),
S2_gpu.gpudata,
colStart_gpu.gpudata,
colEnd_gpu.gpudata,
colPtrs_gpu.gpudata,
rowIndices_gpu.gpudata,
dS_gpu.gpudata,
block=(1024,1,1),
grid=(grid_w,1)
)
# If this is a spatial dataset then also compute dX
# dX_gpu = None
# if dataSet1.isspatial and dataSet2.isspatial:
# D = dataSet1.D
# assert dataSet2.D == D, "Error: two datasets have different spatial dimensions"
# dX_gpu = gpuarray.empty((D*bufferSize,), dtype=np.float32)
#
# # Copy the spatial data to the GPU
# X1_gpu = gpuarray.to_gpu(dataSet1.X.astype(np.float32))
# X2_gpu = gpuarray.to_gpu(dataSet2.X.astype(np.float32))
#
# self.gpuKernels["computeDx"](np.int32(D),
# np.int32(N1),
# X1_gpu.gpudata,
# np.int32(N2),
# X2_gpu.gpudata,
# rowIndices_gpu.gpudata,
# colPtrs_gpu.gpudata,
# dX_gpu.gpudata,
# block=(1024,1,1),
# grid=(grid_w,1)
# )
ds = dS_gpu.get()
if not np.all(ds > 0):
log.info("Min DS: %f", np.min(ds))
raise Exception("ERROR: DS contains nonpositive entries")
# assert np.all(ds <= self.params["dt_max"]), "ERROR: DS contains entries greater than dt_max!"
# assert np.all(ds < self.params["dt_max"]), "ERROR: DS contains entries equal to dt_max!"
# Update gpuData dictionary
gpuData = {}
gpuData["dS_size"] = bufferSize
gpuData["dS"] = dS_gpu
gpuData["rowIndices"] = rowIndices_gpu
gpuData["colPtrs"] = colPtrs_gpu
# gpuData["dxBuffer_gpu"] = dX_gpu
return gpuData
| [
"[email protected]"
]
| |
7e8116443903d033a1a47a2ffed807aec258d0c3 | 49e17d736df9889b3a0d91705abd0f3ed579d17c | /quests/Temple_Of_Ikov.py | b4d3f0e615c687daab5b6c89a084be6e2400e914 | []
| no_license | TheWhirl/RunescapeQuestWebsite | 4f258c04a1c1e6bb9f6d9e0fa63fdcab452ccfc2 | 8d5dacbc8251bd1f2dded4ffa04400ed48e0f1fb | refs/heads/master | 2020-05-16T02:54:35.603906 | 2018-12-23T13:03:58 | 2018-12-23T13:03:58 | 182,643,424 | 0 | 0 | null | 2019-04-22T07:22:00 | 2019-04-22T07:21:59 | null | UTF-8 | Python | false | false | 443 | py | import os
import sys
sys.path.insert(0,
os.path.dirname(os.path.realpath(__file__))[
0:-len("quests")])
from QuestInfo import Quest
class Temple_Of_Ikov(Quest):
def __init__(self):
super().__init__("Temple of Ikov")
self.age = 5
self.difficulty = "Experienced"
self.length = "Medium"
self.quest_points = 1
self.thieving = 42
self.ranged = 40
| [
"[email protected]"
]
| |
0dd9e260541d802e91f2058473d8baff323b757c | 60a29068e369900bd1de946bdbc7f9cf61a23127 | /manage.py | c7299db7505f33e3eb96f61001d69f0bc600b78c | []
| no_license | crowdbotics-apps/circuit-web-version-22188 | 4e44be1fb2d6ded8e87f731fd3a2c85e3cfec19e | 7eed4cb920846adf871057b150d0eb72b794a7aa | refs/heads/master | 2023-01-21T08:59:50.677549 | 2020-11-21T22:43:33 | 2020-11-21T22:43:33 | 309,153,421 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'circuit_22188.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
3ab8865d156fd4539ee009f877d33e4d2f16b8ae | ac5e52a3fc52dde58d208746cddabef2e378119e | /exps-sblp-obt/sblp_ut=3.5_rd=1_rw=0.04_rn=4_u=0.075-0.325_p=harmonic-2/sched=RUN_trial=13/params.py | 28bdc367d387d98fbf09079da0322b1eedc608ea | []
| no_license | ricardobtxr/experiment-scripts | 1e2abfcd94fb0ef5a56c5d7dffddfe814752eef1 | 7bcebff7ac2f2822423f211f1162cd017a18babb | refs/heads/master | 2023-04-09T02:37:41.466794 | 2021-04-25T03:27:16 | 2021-04-25T03:27:16 | 358,926,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 248 | py | {'cpus': 4,
'duration': 30,
'final_util': '3.628952',
'max_util': '3.5',
'periods': 'harmonic-2',
'release_master': False,
'res_distr': '1',
'res_nmb': '4',
'res_weight': '0.04',
'scheduler': 'RUN',
'trial': 13,
'utils': 'uni-medium-3'}
| [
"[email protected]"
]
| |
ac78f5706a5fa6ab691f744614ebe243eeb0e6e6 | 81407be1385564308db7193634a2bb050b4f822e | /the-python-standard-library-by-example/SimpleXMLRPCServer/SimpleXMLRPCServer_dotted_name.py | 927f913a51fc70c40159c7b5e56b864de61651e3 | [
"MIT"
]
| permissive | gottaegbert/penter | 6db4f7d82c143af1209b4259ba32145aba7d6bd3 | 8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d | refs/heads/master | 2022-12-30T14:51:45.132819 | 2020-10-09T05:33:23 | 2020-10-09T05:33:23 | 305,266,398 | 0 | 0 | MIT | 2020-10-19T04:56:02 | 2020-10-19T04:53:05 | null | UTF-8 | Python | false | false | 537 | py | #!/usr/bin/env python
# encoding: utf-8
#
# Copyright (c) 2008 Doug Hellmann All rights reserved.
#
"""
"""
__version__ = "$Id$"
#end_pymotw_header
from SimpleXMLRPCServer import SimpleXMLRPCServer
import os
server = SimpleXMLRPCServer(('localhost', 9000), allow_none=True)
server.register_function(os.listdir, 'dir.list')
server.register_function(os.mkdir, 'dir.create')
server.register_function(os.rmdir, 'dir.remove')
try:
print 'Use Control-C to exit'
server.serve_forever()
except KeyboardInterrupt:
print 'Exiting' | [
"[email protected]"
]
| |
5d09f348af5df16b53230056d4eb3e6758f688c8 | 9d7a1f61e957c6ba688ba9acbd4810bfc41259bd | /crawling/scrapy/section04_03/section04_03/pipelines.py | d6934d13d6491f2787f92d85948d74fd762da68b | []
| no_license | saanghyuk/data_science_python | 17f4c35b9f4d197991fd0c03eecd06487ceaa9a0 | 7dde1ed2a3570edbdd716a43a4a340e64f7e2bb0 | refs/heads/master | 2023-08-24T10:47:13.478635 | 2021-11-05T15:37:33 | 2021-11-05T15:37:33 | 355,115,183 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,113 | py | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from scrapy.exceptions import DropItem
import csv
import xlsxwriter
class TestSpiderPipeline:
# 초기화 메서드
def __init__(self):
# 엑셀 처리 선언
self.workbook = xlsxwriter.Workbook("./result_excel.xlsx")
# CSV처리 선언(a, w 옵션 변경)
self.file_opener = open("./result_excel.csv", 'w')
self.csv_writer = csv.DictWriter(self.file_opener, fieldnames = ['rank_num', 'site_name', 'daily_time_site', 'daily_page_view', 'is_pass'])
#워크시트
self.worksheet = self.workbook.add_worksheet()
# 삽입 수
self.rowcount = 1
# 최초 1회 실행
def open_spider(self, spider):
spider.logger.info('TestSpider Pipeline Started ')
def process_item(self, item, spider):
if int(item.get('rank_num')) < 41:
item['is_pass'] = True
# 엑셀 저장
self.worksheet.write('A%s' %self.rowcount, item.get('rank_num'))
self.worksheet.write('B%s' %self.rowcount, item.get('site_name'))
self.worksheet.write('C%s' %self.rowcount, item.get('daily_time_site'))
self.worksheet.write('D%s' %self.rowcount, item.get('daily_page_view'))
self.worksheet.write('E%s' %self.rowcount, item.get('is_pass'))
self.rowcount+=1
# CSV 저장
self.csv_writer.writerow(item)
return item
else:
raise DropItem('Dropped Item. Because This Site Rank is {}'.format(item.get('rank_number')))
# print('Sorry, Dropped')
# 마지막 1회 실행
def close_spider(self, spider ):
# 엑셀 파일 닫기
self.workbook.close()
# csv파일 닫기
self.file_opener.close()
# 종료 선언
spider.logger.info('TestSpider Pipeline Closed') | [
"[email protected]"
]
| |
1612efa81047e7a20e0dadd4e871ca67fee01b1b | f879be78003d04f5332ea18373ef0de1a17f5817 | /ververica_sdk/models/delete_api_token_response.py | 06f93515c8b1b040224e70273134aed534c4b518 | []
| no_license | justlikemikezz/ververica-sdk | 8228b1d1e9bb9c0530842162f771f7708d1b1555 | b946aa879cc80ad25b8c746b8c2cdc6bde086cbb | refs/heads/master | 2020-12-22T15:58:27.469611 | 2020-01-29T00:33:21 | 2020-01-29T00:33:21 | 236,849,548 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,484 | py | # coding: utf-8
"""
Ververica Platform API
The Ververica Platform APIs, excluding Application Manager. # noqa: E501
OpenAPI spec version: 2.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class DeleteApiTokenResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
}
attribute_map = {
}
def __init__(self): # noqa: E501
"""DeleteApiTokenResponse - a model defined in Swagger""" # noqa: E501
self.discriminator = None
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(DeleteApiTokenResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteApiTokenResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
]
| |
f0128317036c9b966541e24a1e1efe172ad2fce5 | cc5eb8eb50d64ffbca780c42a908053ec549f295 | /python-in-a-day-scripts/ch12 program/script_002.py | 43129ebbb2a9f5b3ad633d6fc7d93d8accaedfbb | []
| no_license | bemagee/LearnPython | 328b1f7a9d5046fe1503aece8a5134a7dd2727d2 | a42565f8fb45f9e2ebbcdcf359ebb9092bf837c2 | refs/heads/master | 2020-12-13T02:45:30.308604 | 2016-10-24T03:09:12 | 2016-10-24T03:09:12 | 10,793,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Our epic programmer dict from before
epic_programmer_dict = {
'Tim Berners-Lee' : ['[email protected]', 111],
'Guido van Rossum' : ['[email protected]', 222],
'Linus Torvalds': ['[email protected]', 333],
'Larry Page' : ['[email protected]', 444],
'Sergey Brin' : ['[email protected]', 555]
}
print epic_programmer_dict
| [
"[email protected]"
]
| |
411846624c150abad251688c80a09c1cad1dc3a9 | 3dcc6eaef0ca68b230ed61b9fd2bfaf78f8d1c7d | /todo_app/todos/models/__init__.py | 007b0f8bc1c970fe2f9d07ff26b0dd5391d4d216 | []
| no_license | ivo-bass/ToDo-App | a6f92be6ba8dcb266cd9ab58d50bafc44ce3db9f | 0410fe885f729ef85e83a7779a5e971e42f74479 | refs/heads/main | 2023-05-14T13:28:50.219962 | 2021-06-18T13:14:49 | 2021-06-18T13:14:49 | 373,607,487 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from .todo import Todo
from .priority import Priority
from .category import Category
| [
"[email protected]"
]
| |
636fe7f33650c3bd29921d6bf95425a2aeeaef48 | d09fd96bbc931fbb8522e5c991973f064a4ded50 | /baxter/devel/.private/baxter_maintenance_msgs/lib/python2.7/dist-packages/baxter_maintenance_msgs/msg/_UpdateStatus.py | dcdfcbd9e5d9bc1182afd40950d3c1c371b7df12 | []
| no_license | rymonyu/EE4-Robotics | b3827ba0dff5bdfdd1e47fe07a40e955c5226f38 | 6cf9272abd7fe8a074dc74a032f6e0b35edb8548 | refs/heads/master | 2020-08-22T15:09:39.706809 | 2019-12-15T23:35:45 | 2019-12-15T23:35:45 | 216,420,098 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,704 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from baxter_maintenance_msgs/UpdateStatus.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class UpdateStatus(genpy.Message):
_md5sum = "74e246350421569590252c39e8aa7b85"
_type = "baxter_maintenance_msgs/UpdateStatus"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# See the class UpdateRunner()
# status: One-word description of the current action being performed
# long_description: Details pertaining to status if any. Used for verbose error messages.
uint16 status
float32 progress
string long_description
uint16 STS_IDLE = 0
uint16 STS_INVALID = 1
uint16 STS_BUSY = 2
uint16 STS_CANCELLED = 3
uint16 STS_ERR = 4
uint16 STS_MOUNT_UPDATE = 5
uint16 STS_VERIFY_UPDATE = 6
uint16 STS_PREP_STAGING = 7
uint16 STS_MOUNT_STAGING = 8
uint16 STS_EXTRACT_UPDATE = 9
uint16 STS_LOAD_KEXEC = 10
"""
# Pseudo-constants
STS_IDLE = 0
STS_INVALID = 1
STS_BUSY = 2
STS_CANCELLED = 3
STS_ERR = 4
STS_MOUNT_UPDATE = 5
STS_VERIFY_UPDATE = 6
STS_PREP_STAGING = 7
STS_MOUNT_STAGING = 8
STS_EXTRACT_UPDATE = 9
STS_LOAD_KEXEC = 10
__slots__ = ['status','progress','long_description']
_slot_types = ['uint16','float32','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
status,progress,long_description
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(UpdateStatus, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.status is None:
self.status = 0
if self.progress is None:
self.progress = 0.
if self.long_description is None:
self.long_description = ''
else:
self.status = 0
self.progress = 0.
self.long_description = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_Hf().pack(_x.status, _x.progress))
_x = self.long_description
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 6
(_x.status, _x.progress,) = _get_struct_Hf().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.long_description = str[start:end].decode('utf-8')
else:
self.long_description = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_Hf = None
def _get_struct_Hf():
global _struct_Hf
if _struct_Hf is None:
_struct_Hf = struct.Struct("<Hf")
return _struct_Hf
| [
"[email protected]"
]
| |
6005b320155e884dcb8bc9c7994fc6807bcf4c35 | aa7c6a9276a859f75b3c5181a92f71d7c19122a5 | /zvt/domain/quotes/stock/stock_1m_kdata.py | f1378710d90001bd962e69feaa23a05bf88f493e | [
"MIT"
]
| permissive | Pengyuyan2/zvt | deef9c5e5bd91c65728ad9bac8c79499707519ee | 9f9c77efcd34c04aaf11b12da0cf483cbe55e297 | refs/heads/master | 2023-07-12T16:55:15.040579 | 2021-08-22T09:41:33 | 2021-08-22T09:55:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 525 | py | # -*- coding: utf-8 -*-
# this file is generated by gen_kdata_schema function, dont't change it
from sqlalchemy.orm import declarative_base
from zvt.contract.register import register_schema
from zvt.domain.quotes import StockKdataCommon
KdataBase = declarative_base()
class Stock1mKdata(KdataBase, StockKdataCommon):
__tablename__ = 'stock_1m_kdata'
register_schema(providers=['joinquant'], db_name='stock_1m_kdata', schema_base=KdataBase, entity_type='stock')
# the __all__ is generated
__all__ = ['Stock1mKdata'] | [
"[email protected]"
]
| |
faac087b45aa6ee29a57ab65290e48b37be927c5 | 03143da0cf99ea92d372feca0954d22d64858d86 | /Approach 4/EMNIST/EMNIST-4/utils/mnistutil.py | a7b7435b0cbd75973bfc88d7ab188e5b1eaa0596 | []
| no_license | rangeetpan/moduleDecompose | ff31732a878e5f9d5e79c3bba9fd9f051c7a5d16 | 508c2a87e00c4e1b616bc29515fc1de2aba55c4e | refs/heads/master | 2021-02-20T08:49:55.157544 | 2020-03-06T04:50:43 | 2020-03-06T04:50:43 | 245,332,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,184 | py | '''
Created on Feb 8, 2019
@author: mislam
'''
from keras.datasets import mnist
from skimage.transform import resize
import numpy as np
from keras import backend as K
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
class MNISTUitl:
def __init__(self):
self.name = None
def load(self,f):
return np.load(f)['arr_0']
def getdata(self,a,b,img_rows = 28, img_cols = 28):
# the data, split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_zo = []
y_zo = []
for i in range(len(y_train)):
if y_train[i] == a or y_train[i] == b:
A = resize(x_train[i], (img_rows, img_cols),mode='constant')
Ay = y_train[i]#resize(y_train[i], (img_rows, img_cols))
x_zo.append(A)
y_zo.append(Ay)
xt_zo = []
yt_zo = []
for i in range(len(y_test)):
if y_test[i] == a or y_test[i] == b:
A = resize(x_test[i], (img_rows, img_cols),mode='constant')
Ay = y_test[i]#resize(y_train[i], (img_rows, img_cols))
xt_zo.append(A)
yt_zo.append(Ay)
x_zo = np.array(x_zo)
y_zo = np.array(y_zo)
xt_zo = np.array(xt_zo)
yt_zo = np.array(yt_zo)
return x_zo, y_zo, xt_zo, yt_zo
def getdata2(self,a,b,img_rows = 28, img_cols = 28):
# the data, split between train and test sets
x_train = self.load('emnist-train-imgs.npz')
x_test = self.load('emnist-test-imgs.npz')
y_train = self.load('emnist-train-labels.npz')
for i in range(0,len(y_train)):
y_train[i]=y_train[i]-1
y_test = self.load('emnist-test-labels.npz')
for i in range(0,len(y_test)):
y_test[i]=y_test[i]-1
x_zo = []
y_zo = []
for i in range(len(y_train)):
if y_train[i] in [0,1,2,3,4,5,6,7,8,9]:
A = resize(x_train[i], (img_rows, img_cols),mode='constant')
Ay = y_train[i]#resize(y_train[i], (img_rows, img_cols))
x_zo.append(A)
y_zo.append(Ay)
xt_zo = []
yt_zo = []
for i in range(len(y_test)):
if y_test[i] in [0,1,2,3,4,5,6,7,8,9]:
A = resize(x_test[i], (img_rows, img_cols),mode='constant')
Ay = y_test[i]#resize(y_train[i], (img_rows, img_cols))
xt_zo.append(A)
yt_zo.append(Ay)
x_zo = np.array(x_zo)
y_zo = np.array(y_zo)
xt_zo = np.array(xt_zo)
yt_zo = np.array(yt_zo)
return x_zo, y_zo, xt_zo, yt_zo
def train(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 2):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo#keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo#keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(7, activation=tf.nn.relu ,name = "H"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
nm.fit(x_train, y_train, epochs=10)
return nm, x_test, y_test
def train2(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainDense2(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H1"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H2"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainDense4(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H1"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H2"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H3"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H4"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainDense6(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
nm = keras.Sequential([
keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H1"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H2"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H3"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H4"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H5"),
keras.layers.Dense(49, activation=tf.nn.relu ,name = "H6"),
keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
])
nm.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print(nm.summary())
nm.fit(x_train, y_train, epochs=ep)
return nm, x_test, y_test
def trainData(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
if K.image_data_format() == 'channels_first':
x_zo = x_zo.reshape(x_zo.shape[0], 1, img_rows, img_cols)
xt_zo = xt_zo.reshape(xt_zo.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_zo.shape,x_train.shape[0], 'train samples', y_zo.shape)
print(x_test.shape[0], 'test samples')
y_train = y_zo #keras.utils.to_categorical(y_zo, numclass )
y_test = yt_zo #keras.utils.to_categorical(yt_zo, numclass)
print(y_zo.shape,y_train.shape)
# nm = keras.Sequential([
# keras.layers.Flatten(input_shape=(img_rows, img_cols,1), name = "Input"),
# keras.layers.Dense(49, activation=tf.nn.relu ,name = "H"),
# keras.layers.Dense(numclass, activation=tf.nn.softmax, name = "output")
# ])
# nm.compile(optimizer='adam',
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
# print(nm.summary())
# nm.fit(x_train, y_train, epochs=ep)
return x_test, y_test,x_train, y_train
def train3(self,x_zo,y_zo,xt_zo,yt_zo,img_rows = 28, img_cols = 28,numclass = 10,ep = 20):
input_shape = (img_rows,img_cols,1)
x_zo = x_zo.reshape(x_zo.shape[0], img_rows, img_cols, 1)
xt_zo = xt_zo.reshape(xt_zo.shape[0], img_rows, img_cols, 1)
x_train = x_zo.astype('float32')
x_test = xt_zo.astype('float32')
x_train /= 255
x_test /= 255
y_train = keras.utils.to_categorical(y_zo, numclass )
y_test = keras.utils.to_categorical(yt_zo, numclass)
num_classes = 10
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
#model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=ep)
return model, x_test, y_test | [
"[email protected]"
]
| |
86118937a3c5da7d22eb06c3ed34e49f7cfa2f11 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2711/47774/305852.py | a8c9c3680c535404ce3caf423c50014ec1f95130 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,535 | py | def isSimilar(s1, s2):
diff, l = 0, len(s1)
for i in range(l):
if (s1[i] != s2[i]):
diff += 1
if (diff > 2):
return False
return True
def find(f, x):
return f[x] if x == f[x] else find(f, f[x])
def merge(f, x, y):
rx = find(f, f[x])
ry = find(f, f[y])
f[ry] = rx
def solve(A):
A = list(set(A))
l,w = len(A), len(A[0])
res = 0
f = [i for i in range(l)]
if l <= w*w:
for i in range(l):
for j in range(i + 1, l):
if (find(f, i) != find(f,j)):
isS = isSimilar(A[i], A[j])
if (isS):
merge(f, i, j)
else:
dict = {}
for i in range(l):
if (A[i] in dict):
dict[A[i]].add(i)
else:
dict[A[i]] = {i}
word = list(A[i])
for i0 in range(w):
for j0 in range(i0+1, w):
if (word[i0] != word[j0]):
word[i0],word[j0] = word[j0],word[i0]
neighbor = ''.join(word)
if (neighbor in dict):
dict[neighbor].add(i)
else:
dict[neighbor] = {i}
word[i0],word[j0] = word[j0],word[i0]
for i in range(l):
for j in dict[A[i]]:
merge(f,i,j)
for i in range(l):
if (i == f[i]):
res += 1
return res
s=eval(input())
print(solve(s)) | [
"[email protected]"
]
| |
5245bc11bfacf34d092a6630efd1e6ec7b5948a9 | 32809f6f425bf5665fc19de2bc929bacc3eeb469 | /src/1096-Brace-Expansion-II/1096.py | 78067156acba02fd1f032327859403cee51255d5 | []
| no_license | luliyucoordinate/Leetcode | 9f6bf01f79aa680e2dff11e73e4d10993467f113 | bcc04d49969654cb44f79218a7ef2fd5c1e5449a | refs/heads/master | 2023-05-25T04:58:45.046772 | 2023-05-24T11:57:20 | 2023-05-24T11:57:20 | 132,753,892 | 1,575 | 569 | null | 2023-05-24T11:57:22 | 2018-05-09T12:30:59 | C++ | UTF-8 | Python | false | false | 723 | py | import itertools
class Solution:
def braceExpansionII(self, expression):
groups = [[]]
level = 0
for i, c in enumerate(expression):
if c == '{':
if level == 0:
start = i+1
level += 1
elif c == '}':
level -= 1
if level == 0:
groups[-1].append(self.braceExpansionII(expression[start:i]))
elif level == 0:
if c == ",":
groups.append([])
else:
groups[-1].append([c])
return sorted(set().union(*[set(map(''.join, itertools.product(*group))) for group in groups])) | [
"[email protected]"
]
| |
6b19da70918b7711aee9f2fda10eb6fbec50ba0d | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/126/usersdata/191/29517/submittedfiles/ap2.py | c8f2da701341911eecf630c83018954555844586 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | # -*- coding: utf-8 -*-
a=float(input('digite a:'))
b=float(input('digite b:'))
c=float(input('digite c:'))
d=float(input('digite d:'))
if a>=b and b>=c and a>=d:
print(a)
elif b>=a and b>=c and b>=d:
print(b)
elif c>=a and c>=b and c>=d:
print(c)
else:
print(d)
if a<=b and a<=c and a<=d:
print(a)
elif b<=a and b<=c and c<=d:
print(b)
elif c<=a and c<=b and c<=d:
print(c)
else:
print(d)
| [
"[email protected]"
]
| |
69e96d91f1e97b1e4777741ed5926f0e3ffe5d96 | d37ab0fa7dd0026425fc15a13288847ae0954f48 | /src/helixweb/billing/forms_filters.py | dd3d23578a55a833025b34264b3fabe186615716 | []
| no_license | sand8080/helixweb | 4fd84e3df8add42996684a288c16148f8582297b | 5f08b4cc41d6bd72f54382ebe5e9b45c428fac4b | refs/heads/master | 2020-12-24T15:23:16.944216 | 2014-02-17T10:56:45 | 2014-02-17T10:56:45 | 1,048,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,846 | py | from django import forms
from django.utils.translation import ugettext_lazy as _
from helixweb.core.widgets import ConstInput
from helixweb.core.forms_filters import (FilterForm, AbstractFilterActionLogsForm,
AbstractFilterAllActionLogsForm, AbstractFilterSelfActionLogsForm,
AbstractFilterUserActionLogsForm)
from helixweb.billing.forms import BillingForm
class FilterBillingForm(FilterForm, BillingForm):
pass
class AbstractBillingFilterActionLogsForm(AbstractFilterActionLogsForm, FilterBillingForm):
action = 'get_action_logs'
def __init__(self, *args, **kwargs):
kwargs['choices'] = (('', ''),
('add_balance', _('add balance')),
('modify_balance', _('modify balance')),
('add_receipt', _('add receipt')),
('add_bounus', _('add bonus')),
('lock', _('lock')),
('unlock', _('unlock')),
('charge_off', _('charge off')),
('modify_used_currencies', _('modify currencies')),
)
super(AbstractBillingFilterActionLogsForm, self).__init__(*args, **kwargs)
class FilterAllActionLogsForm(AbstractBillingFilterActionLogsForm, AbstractFilterAllActionLogsForm):
pass
class FilterSelfActionLogsForm(AbstractBillingFilterActionLogsForm, AbstractFilterSelfActionLogsForm):
pass
class FilterUserActionLogsForm(AbstractBillingFilterActionLogsForm, AbstractFilterUserActionLogsForm):
pass
class FilterCurrenciesForm(FilterBillingForm):
action = 'get_currencies'
ordering_param = '-code'
class FilterUsedCurrenciesForm(FilterBillingForm):
action = 'get_used_currencies'
ordering_param = '-code'
class FilterBalanceForm(FilterBillingForm):
action = 'get_balances'
def __init__(self, *args, **kwargs):
currencies = kwargs.pop('currencies', [])
super(FilterBalanceForm, self).__init__(*args, **kwargs)
self.fields['id'] = forms.IntegerField(label=_('balance id'), required=False)
self.fields['user_id'] = forms.IntegerField(label=_('user id'), required=False)
self.fields['currency_code'] = self._gen_currency_code(currencies, required=False)
self.fields['from_real_amount'] = forms.DecimalField(label=_('real amount from'),
required=False)
self.fields['to_real_amount'] = forms.DecimalField(label=_('real amount to'),
required=False)
self.fields['from_virtual_amount'] = forms.DecimalField(label=_('virtual amount from'),
required=False)
self.fields['to_virtual_amount'] = forms.DecimalField(label=_('virtual amount to'),
required=False)
self.fields['from_overdraft_limit'] = forms.DecimalField(label=_('overdraft limit from'),
required=False)
self.fields['to_overdraft_limit'] = forms.DecimalField(label=_('overdraft limit to'),
required=False)
self.fields['from_locked_amount'] = forms.DecimalField(label=_('locked amount from'),
required=False)
self.fields['to_locked_amount'] = forms.DecimalField(label=_('locked amount to'),
required=False)
self.fields['is_active'] = forms.ChoiceField(label=_('is active'), required=False, widget=forms.widgets.RadioSelect(),
choices=(('all', _('all')), ('1', _('active')), ('0', _('inactive'))),
initial='all')
def as_helix_request(self):
d = super(FilterBalanceForm, self).as_helix_request()
self._strip_filter_param(d, 'id')
self._strip_filter_param(d, 'user_id')
self._strip_filter_param(d, 'currency_code')
self._strip_filter_param(d, 'from_real_amount')
self._strip_filter_param(d, 'to_real_amount')
self._strip_filter_param(d, 'from_virtual_amount')
self._strip_filter_param(d, 'to_virtual_amount')
self._strip_filter_param(d, 'from_overdraft_limit')
self._strip_filter_param(d, 'to_overdraft_limit')
self._strip_filter_param(d, 'from_locked_amount')
self._strip_filter_param(d, 'to_locked_amount')
if (not d['filter_params']['is_active'] or
d['filter_params']['is_active'] == 'all'):
d['filter_params'].pop('is_active')
else:
val = bool(int(d['filter_params']['is_active']))
d['filter_params']['is_active'] = val
return d
class AbstractFilterLocksForm(FilterBillingForm):
action = 'get_locks'
def _add_common_fields(self):
self.fields['order_id'] = forms.CharField(label=_('order id'),
max_length=64, required=False)
self.fields['from_creation_date'] = forms.DateField(label=_('from'), required=False)
self.fields['to_creation_date'] = forms.DateField(label=_('to'), required=False)
def as_helix_request(self):
d = super(AbstractFilterLocksForm, self).as_helix_request()
self._strip_filter_param(d, 'user_id')
self._strip_filter_param(d, 'order_id')
self._strip_filter_param(d, 'balance_id')
self._strip_from_date_param(d, 'from_creation_date')
self._strip_to_date_param(d, 'to_creation_date')
return d
class FilterLocksForm(AbstractFilterLocksForm):
def __init__(self, *args, **kwargs):
super(FilterLocksForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
required=False)
self._add_common_fields()
class FilterUserBalanceLocksForm(AbstractFilterLocksForm):
def __init__(self, *args, **kwargs):
super(FilterUserBalanceLocksForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
widget=ConstInput, required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
widget=ConstInput, required=False)
self._add_common_fields()
class FilterSelfLocksForm(AbstractFilterLocksForm):
action = 'get_locks_self'
def __init__(self, *args, **kwargs):
super(FilterSelfLocksForm, self).__init__(*args, **kwargs)
self._add_common_fields()
class AbstractFilterTransactionsForm(FilterBillingForm):
action = 'get_transactions'
def _add_common_fields(self):
self.fields['order_id'] = forms.CharField(label=_('order id'),
max_length=64, required=False)
self.fields['type'] = forms.ChoiceField(label=_('type'), required=False,
widget=forms.widgets.Select(),
choices=((None, _('all')), ('receipt', _('receipt')), ('bonus', _('bonus')),
('lock', _('lock')), ('unlock', _('unlock')), ('charge_off', _('charge off'))),
initial='all')
self.fields['from_creation_date'] = forms.DateField(label=_('from'), required=False)
self.fields['to_creation_date'] = forms.DateField(label=_('to'), required=False)
def as_helix_request(self):
d = super(AbstractFilterTransactionsForm, self).as_helix_request()
self._strip_filter_param(d, 'id')
self._strip_filter_param(d, 'user_id')
self._strip_filter_param(d, 'order_id')
self._strip_filter_param(d, 'type')
self._strip_filter_param(d, 'balance_id')
self._strip_from_date_param(d, 'from_creation_date')
self._strip_to_date_param(d, 'to_creation_date')
return d
class FilterTransactionsForm(AbstractFilterTransactionsForm):
def __init__(self, *args, **kwargs):
super(FilterTransactionsForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
required=False)
self.fields['id'] = forms.IntegerField(label=_('id'),
required=False)
self._add_common_fields()
class FilterUserTransactionsForm(AbstractFilterTransactionsForm):
def __init__(self, *args, **kwargs):
super(FilterUserTransactionsForm, self).__init__(*args, **kwargs)
self.fields['user_id'] = forms.IntegerField(label=_('user id'),
widget=ConstInput, required=False)
self.fields['balance_id'] = forms.IntegerField(label=_('balance id'),
widget=ConstInput, required=False)
self.fields['id'] = forms.IntegerField(label=_('id'),
required=False)
self._add_common_fields()
class FilterSelfTransactionsForm(AbstractFilterTransactionsForm):
action = 'get_transactions_self'
def __init__(self, *args, **kwargs):
super(FilterSelfTransactionsForm, self).__init__(*args, **kwargs)
self.fields['id'] = forms.IntegerField(label=_('id'),
required=False)
self._add_common_fields() | [
"[email protected]"
]
| |
1c3d00acafd76a610342ab1ef712ad250ee8870c | b2bdd5997ac84b0e19071c1ddc1c1a4d2f4fab58 | /catkin_ws/devel/.private/p2/lib/python2.7/dist-packages/p2/msg/_Ackermann.py | 0dff4e208b8c08e4de290b065cd192a52bee173e | []
| no_license | hbtslys01/RosCodingProject | 860d18531dabe4a969278deff5dbad8a8703ea83 | 226feda08724e92fd94191e123b9442c028283dd | refs/heads/master | 2020-04-11T09:16:17.808626 | 2018-12-13T17:30:08 | 2018-12-13T17:30:08 | 161,671,560 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,768 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from p2/Ackermann.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class Ackermann(genpy.Message):
_md5sum = "61c7e29a36f91d9c196a9722234d7472"
_type = "p2/Ackermann"
_has_header = False #flag to mark the presence of a Header object
_full_text = """float64 steering_angle
float64 vel
"""
__slots__ = ['steering_angle','vel']
_slot_types = ['float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
steering_angle,vel
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Ackermann, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.steering_angle is None:
self.steering_angle = 0.
if self.vel is None:
self.vel = 0.
else:
self.steering_angle = 0.
self.vel = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.steering_angle, _x.vel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.steering_angle, _x.vel,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_2d().pack(_x.steering_angle, _x.vel))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
end = 0
_x = self
start = end
end += 16
(_x.steering_angle, _x.vel,) = _get_struct_2d().unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_2d = None
def _get_struct_2d():
global _struct_2d
if _struct_2d is None:
_struct_2d = struct.Struct("<2d")
return _struct_2d
| [
"[email protected]"
]
| |
a14e1188bdfc65d5b6c7835a865851cf2b468dce | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/srt.py | fe6f0681dbf56a25b61e42d88d931b804baf7678 | []
| no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 82 | py | ii = [('ShawHDE.py', 1), ('AubePRP.py', 1), ('FerrSDO2.py', 1), ('ClarGE3.py', 1)] | [
"[email protected]"
]
| |
2159710ed19f6e6b65f4a46e2509a0dbadb81e3b | 119c716206804aef3eb99c5ca24e8e16eed3473a | /openaddr/tests/coverage.py | 8693f0645fb3ed0e3777f5f06456c426eb04111b | [
"ISC"
]
| permissive | enterstudio/machine-1 | d190db7cd8fceb409e25232b65507ec21c1b7009 | 43d4d3b41e1ad3410c2442b8220e6e1a9fe2255f | refs/heads/master | 2021-08-30T08:06:05.055323 | 2017-12-16T23:30:56 | 2017-12-16T23:30:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,950 | py | import unittest
import unittest.mock
import os
import psycopg2
from httmock import HTTMock, response
DATABASE_URL = os.environ.get('DATABASE_URL', 'postgres:///hooked_on_sources')
from ..ci import recreate_db
from ..ci.coverage import calculate
class TestCalculate (unittest.TestCase):
def setUp(self):
'''
'''
recreate_db.recreate(DATABASE_URL)
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as db:
db.execute("insert into cb_2013_us_state_20m (gid, name, usps_code, geom) values (1, 'Kansas', 'KS', ST_SetSRID('MULTIPOLYGON(((-102.0472 40.0033, -94.6143 40.0033, -94.6143 36.9985, -102.0472 36.9985, -102.0472 40.0033)))'::geometry, 4326))")
db.execute("insert into ne_50m_admin_0_countries (gid, name, name_long, iso_a2, iso_a3, geom) values (1, 'Null Is.', 'Null Island', 'XX', 'XXX', '0106000020E610000002000000010300000001000000270000008EB1135E82533FBF691D554D1075EF3E90BE49D3A0683EBF664CC11A67D3F13EA208A9DBD9573EBF3FABCC94D6DFE23EEC3026FDBD143EBF8DEDB5A0F7C6E03E3659A31EA2D13DBF664CC11A67D3F13E13D21A834E083DBF54E41071732AE93EDCF3FC69A33A3DBF8DEDB5A0F7C6D03E0188BB7A15193DBF0000000000000000CCB6D3D688603CBF54E41071732AC93E0395F1EF332E3CBFF168E388B5F8E4BE7044F7AC6BB43CBF05A227655243EBBE7044F7AC6BB43CBF8DEDB5A0F7C6F0BE2829B000A60C3CBF664CC11A67D3F1BE16DF50F86C1D3CBF180AD80E46EC03BF7044F7AC6BB43CBF180AD80E46EC03BFA72215C616823CBF3677F4BF5C8B06BFEE3D5C72DC293DBF05A2276552430BBFA515DF50F86C3DBFC093162EABB009BF3659A31EA2D13DBF2D431CEBE2360ABF3659A31EA2D13DBF240F441669E20DBF344C6DA983BC3EBF240F441669E20DBF344C6DA983BC3EBFC093162EABB009BFD7D9907F66103FBFC093162EABB009BF691D554D10753FBF4BB0389CF9D50CBF61C1FD80070640BFE7340BB43BA408BFD5CC5A0A48FB3FBFA226FA7C941107BF213EB0E3BF4040BF664CC11A67D301BF4F779E78CE1640BFB75F3E59315CFDBE4F779E78CE1640BF05A227655243FBBEFCA9F1D24D6240BF54E41071732AF9BE213EB0E3BF4040BFF168E388B5F8E4BE46D26EF4311F40BFF168E388B5F8E4BE46D26EF4311F40BF54E41071732AD9BE2AE3DF675C3840BF00000000000000801FF5D72B2CB83FBF3FABCC94D6DFE23E56D3F544D7853FBF54E41071732AD93EA0FB7266BB423FBFB75F3E59315CDD3EA0FB7266BB423FBFA226FA7C9411E73E8EB1135E82533FBF691D554D1075EF3E010300000003000000C7010000C9C7EE022505163F7BA35698BED7303F2829B000A60C1C3F7689EAAD81AD323F1FF5D72B2CB81F3F41B8020AF5F4313F86C613419C87233F0ADAE4F04927323FA019C40776FC273FD2FBC6D79E59323F7D923B6C2233273F7689EAAD81AD323F9E0C8E9257E7283F613255302AA9333FE527D53E1D8F293FAB5AD2510E66333F99F221A81ABD2A3F3D9E961FB8CA333F72512D228AC92B3F2A5437177FDB333F261C7A8B87F72C3F3D9E961FB8CA333F240F441669E22D3FE02BBAF59A1E343FFA60191BBAD92F3FF5824F73F222333F8EB1135E82532F3F3FABCC94D6DF323FFA60191BBAD92F3FF78F85E81038323FFA60191BBAD92F3FE7525C55F65D313F0EF450DB8651303F1F317A6EA12B313FB345D26EF4312F3FD5CC5A0A48FB2F3F6B2A8BC22E8A2E3F46D26EF4311F303F92CB7F48BF7D2D3F4489963C9E962F3FBC79AA436E862B3F691D554D10752F3FE31A9FC9FE792A3F1FF5D72B2CB82F3FC093162EABB0293FD7D9907F66102F3FA019C40776FC273F33880FECF82F303F315D88D51F61283FE95F92CA1473303F6859F78F85E8203F46D26EF4311F303F664CC11A67D3213F581CCEFC6A0E303FF5824F73F222233F6B2A8BC22E8A2E3F3FABCC94D6DF223F48A30227DBC02D3FAB5AD2510E66233F92CB7F48BF7D2D3F3D9E961FB8CA233F95D8B5BDDD922C3FA94D9CDCEF50243F95D8B5BDDD922C3FF168E388B5F8243FDCF3FC69A33A2D3FEC4E779E78CE263FB96C74CE4F712C3FEA4141295AB9273F261C7A8B87F72C3F56F146E6913F283F261C7A8B87F72C3F0CC9C9C4AD82283FDE0033DFC14F2C3F97E5EB32FCA72B3FB96C74CE4F712C3FDE0033DFC14F2C3F4DBD6E1118EB2B3F240F441669E22D3FBE86E0B88C9B2A3F8EB1135E82532F3FBE86E0B88C9B2A3F213EB0E3BF40303F52D7DAFB54152A3F6859F78F85E8303F52D7DAFB54152A3F560F98874CF9303FC093162EABB0293F6859F78F85E8303F9E0C8E9257E7283F317BD976DA1A313F56F146E6913F283F1F317A6EA12B313F13F06B2409C2253F6859F78F85E8303F3A9160AA99B5243F8EB1135E82532F3F3A9160AA99B5243FB345D26EF4312F3F3FABCC94D6DF223F1FF5D72B2CB82F3FAE6708C72C7B223F46D26EF4311F303F1A170E846401233FE95F92CA1473303F3FABCC94D6DF223FC5CBD3B9A294303FF78F85E81038223F7BA35698BED7303FAE6708C72C7B223F1F317A6EA12B313FF78F85E81038223F6859F78F85E8303FFA9CBB5D2F4D213F560F98874CF9303FB28174B169A5203FE7525C55F65D313FFCA9F1D24D62203F8BE07F2BD9B1313F1FF5D72B2CB81F3F41B8020AF5F4313F213EB0E3BF40203FC0B167CF656A323FFCA9F1D24D62203F7689EAAD81AD323F691D554D10751F3FF5824F73F222333F691D554D10751F3FAB5AD2510E66333F90BE49D3A0681E3FAB5AD2510E66333F2829B000A60C1C3F4FE8F527F1B9333F2829B000A60C1C3F180AD80E46EC333F4BB0389CF9D51C3FBB97FBE42840343F4BB0389CF9D51C3F4DDBBFB2D2A4343F4FCAA48636001B3F3A9160AA99B5343F9E0C8E9257E7183FB98AC56F0A2B353FC5AD8218E8DA173F5D18E945ED7E353FE7340BB43BA4183F01A60C1CD0D2353FC5AD8218E8DA173FB77D8FFAEB15363F3677F4BF5C8B163F3677F4BF5C8B363F5D18E945ED7E153FEC4E779E78CE363F5D18E945ED7E153FC7BAB88D06F0363F3A9160AA99B5143FB5705985CD00373FAB5AD2510E66133F58FE7C5BB054373FF5824F73F222133F58FE7C5BB054373FCEE15AED612F143F21205F420587373FCEE15AED612F143FD7F7E12021CA373F3FABCC94D6DF123FD7F7E12021CA373FD2FBC6D79E59123F1F1329CDE671383FD2FBC6D79E59123FB056ED9A90D6383F664CC11A67D3113FB056ED9A90D6383F8DEDB5A0F7C6103F693BA6EECA2E383F05A2276552430B3F315D88D51F61383FC093162EABB0093F315D88D51F61383F3677F4BF5C8B063F7B8505F7031E383F180AD80E46EC033FD7F7E12021CA373F664CC11A67D3F13E6B48DC63E943373F180AD80E46ECF33EFE98D6A6B1BD363F8DEDB5A0F7C6E03EDC114E0B5EF4353F691D554D1075EF3E263ACB2C42B1353F54E41071732AE93E263ACB2C42B1353F54E41071732AD93E94F6065F984C353F54E41071732AD93E4ACE893DB48F353F8DEDB5A0F7C6E0BECCD42478431A353FA226FA7C9411E7BECCD42478431A353F180AD80E46ECF3BE15FDA19927D7343FF168E388B5F8F4BE15FDA19927D7343FB75F3E59315CFDBE84B9DDCB7D72343F90BE49D3A068FEBEF37519FED30D343F54E41071732AF9BE2A5437177FDB333F2D431CEBE236FABE05C078060DFD333FAB5AD2510E6603BF99107349D576333F180AD80E46EC03BF99107349D576333FA226FA7C941107BF7689EAAD81AD323FA226FA7C941107BF7689EAAD81AD323F54E41071732A09BFE54526E0D748323FC093162EABB009BF1C2444F98216323F4BB0389CF9D50CBFF78F85E81038323F691D554D10750FBFF78F85E81038323FB0743E3C4B9011BFC0B167CF656A323FFA9CBB5D2F4D11BF7689EAAD81AD323FD2FBC6D79E5912BFBEA4315A4755333F8DEDB5A0F7C610BF86C613419C87333F89D349B6BA9C12BF180AD80E46EC333FD2FBC6D79E5912BFF37519FED30D343FAB5AD2510E6613BF180AD80E46EC333F3A9160AA99B514BFCEE15AED612F343F3A9160AA99B514BFA94D9CDCEF50343F315D88D51F6118BF05C078060DFD333FE7340BB43BA418BF05C078060DFD333F766B990CC7F319BF86C613419C87333FBC79AA436E861BBF86C613419C87333F4BB0389CF9D51CBF613255302AA9333F4BB0389CF9D51CBF180AD80E46EC333F72512D228AC91BBFA94D9CDCEF50343F95D8B5BDDD921CBFF37519FED30D343F90BE49D3A0681EBFA94D9CDCEF50343FFCA9F1D24D6220BF97033DD4B661343F6859F78F85E820BFBB97FBE42840343FD508FD4CBD6E21BF4DDBBFB2D2A4343FD2FBC6D79E5922BFF168E388B5F8343FD2FBC6D79E5922BFCCD42478431A353F3FABCC94D6DF22BFF168E388B5F8343F86C613419C8723BF94F6065F984C353F5F251FBB0B9424BFC9C7EE022505363F5F251FBB0B9424BFB77D8FFAEB15363FF168E388B5F824BF01A60C1CD0D2353F38842A357BA025BF01A60C1CD0D2353FA43330F2B22626BFC9C7EE022505363F11E335AFEAAC26BFDC114E0B5EF4353FA226FA7C941127BF13F06B2409C2353F58FE7C5BB05427BF13F06B2409C2353FC5AD8218E8DA27BF4ACE893DB48F353FA019C40776FC27BF5D18E945ED7E353F0CC9C9C4AD8228BFEE5BAD1397E3353F2F505260014C29BFEE5BAD1397E3353F745E6397A8DE2ABF263ACB2C42B1353F4FCAA48636002BBF38842A357BA0353F7044F7AC6BB42CBF03B34291EEE7343F4BB0389CF9D52CBFF168E388B5F8343FDAE6C6F484252EBF284701A260C6343FB55208E412472EBF3A9160AA99B5343FD7D9907F66102FBF84B9DDCB7D72343FD7D9907F66102FBF97033DD4B661343F0EF450DB865130BFCEE15AED612F343FA03715A930B630BF3D9E961FB8CA333FC5CBD3B9A29430BF99107349D576333F560F98874CF930BF643F8BA548BE323F43C5387F130A31BFAE6708C72C7B323FC5CBD3B9A29430BF41B8020AF5F4313FC5CBD3B9A29430BF8BE07F2BD9B1313F46D26EF4311F30BF317BD976DA1A313FA03715A930B630BF6859F78F85E8303FB28174B169A530BF8DEDB5A0F7C6303F213EB0E3BF4030BFC5CBD3B9A294303F213EB0E3BF4030BFC5CBD3B9A294303FB28174B169A530BF0EF450DB8651303F317BD976DA1A31BFFCA9F1D24D62303FB0743E3C4B9031BF581CCEFC6A0E303F664CC11A67D331BFB345D26EF4312F3F41B8020AF5F431BFD7D9907F66102F3FD2FBC6D79E5932BF6B2A8BC22E8A2E3FD2FBC6D79E5932BF240F441669E22D3F1C2444F9821632BF4BB0389CF9D52C3FE54526E0D74832BF7044F7AC6BB42C3F78962023A0C231BF0395F1EF332E2C3F9D2ADF3312A131BF0395F1EF332E2C3F41B8020AF5F431BFE31A9FC9FE792A3F41B8020AF5F431BF766B990CC7F3293FE54526E0D74832BF54E41071732A293F1C2444F9821632BFE7340BB43BA4283FE54526E0D74832BF7B8505F7031E283F2F6EA301BC0532BFEA4141295AB9273F540262122EE431BFEA4141295AB9273FB0743E3C4B9031BFEA4141295AB9273F43C5387F130A31BFEA4141295AB9273F8DEDB5A0F7C630BFEC4E779E78CE263F7BA35698BED730BF5D18E945ED7E253F33880FECF82F30BF5D18E945ED7E253F1FF5D72B2CB82FBF15FDA19927D7243FD5CC5A0A48FB2FBF84B9DDCB7D72243FD5CC5A0A48FB2FBFA94D9CDCEF50243FB345D26EF4312FBF180AD80E46EC233FD7D9907F66102FBF3D9E961FB8CA233FB55208E412472EBF3A9160AA99B5243F240F441669E22DBF3A9160AA99B5243F4BB0389CF9D52CBFA7406667D13B253F4DBD6E1118EB2BBFCCD42478431A253FE10D6954E0642BBFCCD42478431A253F745E6397A8DE2ABF180AD80E46EC233FE10D6954E0642BBF3D9E961FB8CA233F99F221A81ABD2ABF1A170E846401233F2A36E675C4212BBF643F8BA548BE223F4FCAA48636002BBFD2FBC6D79E59223FE527D53E1D8F29BF664CC11A67D3213FE527D53E1D8F29BF1F317A6EA12B213FE7340BB43BA428BF1C2444F98216223F336ABE4A3E7627BFF78F85E81038223F809F71E1404826BF89D349B6BA9C223FC9C7EE02250526BF89D349B6BA9C223F38842A357BA025BF8BE07F2BD9B1213FC9C7EE02250526BFD508FD4CBD6E213FEE5BAD1397E325BF8DEDB5A0F7C6203F5B0BB3D0CE6926BF1FF5D72B2CB81F3F82ACA7565F5D25BF240F441669E21D3F38842A357BA025BF240F441669E21D3F15FDA19927D724BF72512D228AC91B3FF168E388B5F824BF72512D228AC91B3FA94D9CDCEF5024BF05A2276552431B3F3D9E961FB8CA23BFBC79AA436E861B3FD0EE9062804423BFDE0033DFC14F1C3F3FABCC94D6DF22BFDE0033DFC14F1C3FF78F85E8103822BFE31A9FC9FE791A3F89D349B6BA9C22BF2D431CEBE2361A3F5F251FBB0B9424BFE31A9FC9FE791A3FA7406667D13B25BFC093162EABB0193F38842A357BA025BF315D88D51F61183F82ACA7565F5D25BFC5AD8218E8DA173F3A9160AA99B524BF7B8505F7031E083F3A9160AA99B524BF7B8505F7031E083F82ACA7565F5D25BF05A2276552430B3F38842A357BA025BFFA9CBB5D2F4D113FA43330F2B22626BF84B9DDCB7D72143FA226FA7C941127BF58FE7C5BB054173F58FE7C5BB05427BF54E41071732A193F0FD6FF39CC9727BF0ABC934F8F6D193F7B8505F7031E28BF0FD6FF39CC97173F7978CF81E50829BF13F06B2409C2153F2F505260014C29BF3FABCC94D6DF123F99F221A81ABD2ABF43C5387F130A113FE10D6954E0642BBFC093162EABB0093FDE0033DFC14F2CBF213EB0E3BF40003FDCF3FC69A33A2DBF54E41071732AC93E240F441669E22DBFA226FA7C9411F7BEDAE6C6F484252EBF7B8505F7031E08BFDAE6C6F484252EBFCEE15AED612F14BFDAE6C6F484252EBFA7406667D13B15BFBE86E0B88C9B2ABFA7406667D13B15BF7B8505F7031E28BF13F06B2409C215BF315D88D51F6128BF0FD6FF39CC9717BF56F146E6913F28BFE7340BB43BA418BFE7340BB43BA428BFE7340BB43BA418BF2F505260014C29BF0FD6FF39CC9717BF9BFF571D39D229BFC5AD8218E8DA17BF2D431CEBE2362ABF9E0C8E9257E718BF08AF5DDA70582ABF0ABC934F8F6D19BFE10D6954E0642BBF2D431CEBE2361ABF0395F1EF332E2CBF2829B000A60C1CBF4DBD6E1118EB2BBF95D8B5BDDD921CBF95D8B5BDDD922CBF4696CCB1BCAB1EBF7044F7AC6BB42CBFD5CC5A0A48FB1FBF0395F1EF332E2CBFB28174B169A520BFDE0033DFC14F2CBFD508FD4CBD6E21BF7044F7AC6BB42CBF41B8020AF5F421BFB96C74CE4F712CBF1C2444F9821622BF4DBD6E1118EB2BBF3FABCC94D6DF22BF4DBD6E1118EB2BBF1A170E84640123BFE10D6954E0642BBF613255302AA923BF2A36E675C4212BBFCEE15AED612F24BFBC79AA436E862BBF5F251FBB0B9424BF4FCAA48636002BBF82ACA7565F5D25BF4FCAA48636002BBF13F06B2409C225BFBC79AA436E862BBF5B0BB3D0CE6926BFBC79AA436E862BBF3677F4BF5C8B26BF99F221A81ABD2ABF11E335AFEAAC26BF2D431CEBE2362ABF7D923B6C223327BFE527D53E1D8F29BFA019C40776FC27BFC093162EABB029BF0CC9C9C4AD8228BF54E41071732A29BF54E41071732A29BF54E41071732A29BFE527D53E1D8F29BFC3A04CA3C9C528BFE527D53E1D8F29BFA019C40776FC27BF766B990CC7F329BFC5AD8218E8DA27BF766B990CC7F329BFC7BAB88D06F026BFE527D53E1D8F29BFEE5BAD1397E325BF766B990CC7F329BF13F06B2409C225BF08AF5DDA70582ABFC7BAB88D06F026BF4FCAA48636002BBFA226FA7C941127BF05A2276552432BBF336ABE4A3E7627BF97E5EB32FCA72BBF0FD6FF39CC9727BF0395F1EF332E2CBF7D923B6C223327BF261C7A8B87F72CBFA226FA7C941127BF0188BB7A15192DBF809F71E1404826BF6D37C1374D9F2DBFC9C7EE02250526BF6D37C1374D9F2DBF84B9DDCB7D7224BFDAE6C6F484252EBF84B9DDCB7D7224BFDAE6C6F484252EBF613255302AA923BFB345D26EF4312FBFAE6708C72C7B22BFB345D26EF4312FBFB0743E3C4B9021BFFC6D4F90D8EE2EBF1F317A6EA12B21BFB345D26EF4312FBF6859F78F85E820BFB345D26EF4312FBF46D26EF4311F20BFFA60191BBAD92FBF46D26EF4311F20BF46D26EF4311F30BF43C5387F130A21BFFCA9F1D24D6230BF43C5387F130A21BFD71533C2DB8330BFB28174B169A520BF8DEDB5A0F7C630BFB28174B169A520BF6859F78F85E830BF46D26EF4311F20BF7BA35698BED730BFDAE6C6F484251EBFFA9CBB5D2F4D31BF95D8B5BDDD921CBF78962023A0C231BF240F441669E21DBF41B8020AF5F431BF0188BB7A15191DBF540262122EE431BFBC79AA436E861BBF0ADAE4F0492732BF4FCAA48636001BBF1C2444F9821632BF766B990CC7F319BF643F8BA548BE32BF2D431CEBE2361ABF1A170E84640133BF05A2276552431BBFBEA4315A475533BFC093162EABB019BFBEA4315A475533BF9E0C8E9257E718BF613255302AA933BFE7340BB43BA418BFBB97FBE4284034BF0FD6FF39CC9717BF726F7EC3448334BF54E41071732A19BFB98AC56F0A2B35BF315D88D51F6118BF6F62484E266E35BFA226FA7C941117BFC9C7EE02250536BF315D88D51F6118BF92E9D0E9793736BF7B8505F7031E18BF92E9D0E9793736BF13F06B2409C215BF48C153C8957A36BF5D18E945ED7E15BF48C153C8957A36BF180AD80E46EC13BF5B0BB3D0CE6936BFAB5AD2510E6613BF5B0BB3D0CE6936BF89D349B6BA9C12BFFE98D6A6B1BD36BF89D349B6BA9C12BF232D95B7239C36BF90BE49D3A0680EBFB5705985CD0037BF99F221A81ABD0ABFC7BAB88D06F036BF3677F4BF5C8B06BF7D923B6C223337BFF168E388B5F804BF21205F42058737BF3677F4BF5C8B06BFEA4141295AB937BFAB5AD2510E6603BF7B8505F7031E38BF180AD80E46EC03BF44A7E7DD585038BFD2FBC6D79E5902BF44A7E7DD585038BF05A227655243FBBE0CC9C9C4AD8238BF2D431CEBE236FABEB056ED9A90D638BF2D431CEBE236FABEC3A04CA3C9C538BF05A227655243EBBE419AB1683A3B39BFA226FA7C9411E7BE419AB1683A3B39BF00000000000000807978CF81E50839BF8DEDB5A0F7C6C03E7978CF81E50839BF54E41071732AE93EF8713447567E39BF54E41071732AE93ED3DD7536E49F39BF691D554D1075EF3EC093162EABB039BFC9C7EE022505F63E662E7079AC1939BFFA9CBB5D2F4D013F54E41071732A39BF180AD80E46EC033FFA7E6ABC749338BF05A2276552430B3F7B8505F7031E38BFDE0033DFC14F0C3F693BA6EECA2E38BF691D554D10750F3FC5AD8218E8DA37BF8DEDB5A0F7C6103F0FD6FF39CC9737BF8DEDB5A0F7C6103F90DC9A745B2237BF8DEDB5A0F7C6103F48C153C8957A36BF1C2444F98216123FA43330F2B22636BF1C2444F98216123F13F06B2409C235BFD2FBC6D79E59123F6F62484E266E35BFCEE15AED612F143F6F62484E266E35BFC9C7EE022505163F15FDA19927D734BF58FE7C5BB054173F97033DD4B66134BF0FD6FF39CC97173FF37519FED30D34BFE7340BB43BA4183F99107349D57633BFE7340BB43BA4183F86C613419C8733BF05A2276552431B3F2C616D8C9DF032BF4BB0389CF9D51C3F9B1DA9BEF38B32BF72512D228AC91B3F2F6EA301BC0532BF4FCAA48636001B3F1C2444F9821632BF90BE49D3A0681E3F7689EAAD81AD32BF4696CCB1BCAB1E3F7689EAAD81AD32BF213EB0E3BF40203FF78F85E8103832BF8DEDB5A0F7C6203FF78F85E8103832BF643F8BA548BE223F1C2444F9821632BF1A170E846401233F8BE07F2BD9B131BFF5824F73F222233FFA9CBB5D2F4D31BFF168E388B5F8243F1F317A6EA12B31BF38842A357BA0253FD71533C2DB8330BFA7406667D13B253F46D26EF4311F30BF11E335AFEAAC263F691D554D10752FBF3677F4BF5C8B263FFF7A8505F7032EBF0FD6FF39CC97273FDCF3FC69A33A2DBF0FD6FF39CC97273F4BB0389CF9D52CBF7978CF81E508293F0395F1EF332E2CBF2F505260014C293F97E5EB32FCA72BBFBE86E0B88C9B2A3F9BFF571D39D229BF0395F1EF332E2C3FE527D53E1D8F29BF0188BB7A15192D3FEA4141295AB927BFB96C74CE4F712C3FC5AD8218E8DA27BF72512D228AC92B3FE7340BB43BA428BFE31A9FC9FE792A3F315D88D51F6128BF9BFF571D39D2293F315D88D51F6128BF2F505260014C293FE7340BB43BA428BFA019C40776FC273FA019C40776FC27BFA019C40776FC273FA226FA7C941127BFEC4E779E78CE263F3677F4BF5C8B26BFEC4E779E78CE263F82ACA7565F5D25BF56F146E6913F283F15FDA19927D724BF315D88D51F61283FCEE15AED612F24BF9E0C8E9257E7283FAB5AD2510E6623BF7B8505F7031E283F1C2444F9821622BF7978CF81E508293FD71533C2DB8320BFC3A04CA3C9C5283FFCA9F1D24D6220BFC5AD8218E8DA273FFC6D4F90D8EE1EBFEA4141295AB9273F4696CCB1BCAB1EBF9E0C8E9257E7283FBC79AA436E861BBF7978CF81E508293FBC79AA436E861BBF97E5EB32FCA72B3FDE0033DFC14F1CBF95D8B5BDDD922C3F4FCAA48636001BBF4BB0389CF9D52C3F99F221A81ABD1ABFDAE6C6F484252E3FC5AD8218E8DA17BF21020EA14ACD2E3FEC4E779E78CE16BF6B2A8BC22E8A2E3FEC4E779E78CE16BF691D554D10752F3F13F06B2409C215BF581CCEFC6A0E303F613255302AA913BF33880FECF82F303F43C5387F130A11BFB345D26EF4312F3FFC6D4F90D8EE0EBFD7D9907F66102F3F90BE49D3A0680EBF4696CCB1BCAB2E3FF168E388B5F804BF6B2A8BC22E8A2E3F84B9DDCB7D7204BF4489963C9E962F3F691D554D1075FFBEFA60191BBAD92F3F691D554D1075FFBEFC6D4F90D8EE2E3F05A227655243FBBEFC6D4F90D8EE2E3F3FABCC94D6DFF2BE0188BB7A15192D3FF168E388B5F8E4BE0188BB7A15192D3FB75F3E59315CDDBEB96C74CE4F712C3F54E41071732AC9BEB96C74CE4F712C3F54E41071732AC93E97E5EB32FCA72B3FF168E388B5F8E43E0395F1EF332E2C3F54E41071732AE93E0188BB7A15192D3F664CC11A67D3F13E0188BB7A15192D3F691D554D1075FF3E6B2A8BC22E8A2E3F664CC11A67D3013F90BE49D3A0682E3F3FABCC94D6DF023F4489963C9E962F3F43C5387F130A113F8EB1135E82532F3F1C2444F98216123FFA60191BBAD92F3FD2FBC6D79E59123F213EB0E3BF40303F8DEDB5A0F7C6103F213EB0E3BF40303FDE0033DFC14F0C3FA03715A930B6303F240F441669E20D3F43C5387F130A313F240F441669E20D3F2F6EA301BC05323F691D554D10750F3F0ADAE4F04927323F8DEDB5A0F7C6103F0CE71A66683C313F1C2444F98216123F1F317A6EA12B313FF5824F73F222133F7BA35698BED7303FC9C7EE022505163F7BA35698BED7303F10000000F5824F73F22213BFC7BAB88D06F026BFFA9CBB5D2F4D11BF11E335AFEAAC26BF3677F4BF5C8B06BFEE5BAD1397E325BF664CC11A67D3F1BE5D18E945ED7E25BF8DEDB5A0F7C6F0BE5F251FBB0B9424BF240F441669E20DBF5F251FBB0B9424BF90BE49D3A0680EBF5D18E945ED7E25BF213EB0E3BF4010BF13F06B2409C225BFFA9CBB5D2F4D11BF5D18E945ED7E25BFB0743E3C4B9011BF180AD80E46EC23BF3FABCC94D6DF12BF180AD80E46EC23BF3FABCC94D6DF12BF5F251FBB0B9424BF664CC11A67D311BFF168E388B5F824BFD2FBC6D79E5912BF38842A357BA025BFD2FBC6D79E5912BF5B0BB3D0CE6926BFF5824F73F22213BFC7BAB88D06F026BF06000000B75F3E59315CDDBE5F251FBB0B9424BFB75F3E59315CDDBEA7406667D13B25BF691D554D1075EF3ECCD42478431A25BFAB5AD2510E66033FA7406667D13B25BFAB5AD2510E66033F3A9160AA99B524BFB75F3E59315CDDBE5F251FBB0B9424BF')")
db.execute("insert into ne_50m_admin_0_countries (gid, name, name_long, iso_a2, iso_a3, geom) values (2, 'USA', 'United States', 'US', 'USA', ST_SetSRID('MULTIPOLYGON(((-123.6 49.6, -65.3 49.6, -65.3 24.0, -123.6 24.0, -123.6 49.6)))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (1, 0, 0, 1, st_setsrid('polygon(( 0 0, 0 1, 1 1, 1 0, 0 0))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (2, 0, -1, 1, st_setsrid('polygon(( 0 -1, 0 0, 1 0, 1 -1, 0 -1))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (3, -1, -1, 1, st_setsrid('polygon((-1 -1, -1 0, 0 0, 0 -1, -1 -1))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (4, -1, 0, 1, st_setsrid('polygon((-1 0, -1 1, 0 1, 0 0, -1 0))'::geometry, 4326))")
db.execute("insert into boxes (id, lon, lat, size, geom) values (5, -99, 39, 1, st_setsrid('polygon((-99 38, -99 39, -98 39, -98 38, -99 38))'::geometry, 4326))")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 1, 2000, 800)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 2, 4000, 600)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 3, 6000, 400)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('XX', 4, 8000, 200)")
db.execute("insert into gpwv4_2015 (iso_a2, box_id, population, area) values ('US', 5, 17907, 9540)")
db.execute("insert into acs5yr_2015 (usps_code, box_id, population, area) values ('KS', 5, 17907, 9540)")
def test_guess_iso_a2(self):
get_iso3166 = lambda n: 'XX' if (n == 'ISO 3166') else None
get_iso3166_2 = lambda n: 'YY-YY' if (n == 'ISO 3166-2') else None
get_us_census = lambda n: '06001' if (n == 'US Census GEOID') else None
get_intl_src_path = lambda n: 'sources/xx/yy.json' if (n == 'source paths') else None
get_us_src_path = lambda n: 'sources/us/ca/oakland.json' if (n == 'source paths') else None
feature = unittest.mock.Mock()
feature.GetField = get_iso3166
self.assertEqual(calculate.guess_iso_a2(feature), 'XX')
feature.GetField = get_iso3166_2
self.assertEqual(calculate.guess_iso_a2(feature), 'YY')
feature.GetField = get_us_census
self.assertEqual(calculate.guess_iso_a2(feature), 'US')
feature.GetField = get_intl_src_path
self.assertEqual(calculate.guess_iso_a2(feature), 'XX')
feature.GetField = get_us_src_path
self.assertEqual(calculate.guess_iso_a2(feature), 'US')
def test_guess_state_abbrev(self):
get_us_census = lambda n: '06001' if (n == 'US Census GEOID') else None
get_intl_src_path = lambda n: 'sources/xx/yy.json' if (n == 'source paths') else None
get_us_src_path = lambda n: 'sources/us/ca/oakland.json' if (n == 'source paths') else None
feature = unittest.mock.Mock()
feature.GetField = get_us_census
self.assertEqual(calculate.guess_state_abbrev(feature), 'CA')
feature.GetField = get_intl_src_path
self.assertIsNone(calculate.guess_state_abbrev(feature))
feature.GetField = get_us_src_path
self.assertEqual(calculate.guess_state_abbrev(feature), 'CA')
def test_calculate(self):
def response_geojson(url, request):
if (request.method, url.hostname, url.path) == ('GET', 'results.openaddresses.io', '/index.json'):
return response(200, b'{"render_geojson_url": "http://data.openaddresses.io/render-world.geojson"}', headers={'Content-Type': 'application/json'})
if (request.method, url.hostname, url.path) == ('GET', 'data.openaddresses.io', '/render-world.geojson'):
null_geojson = '''{\n"type": "FeatureCollection",\n"features": [\n{ "type": "Feature", "properties": {"source count": 1, "name": "Null Island", "source dates": "2017-03-12 21:54:49.107291+00:00", "source paths": "sources/xx/countrywide.json", "ISO 3166": "XX", "ISO 3166-2": null, "US Census GEOID": null, "status": "good", "address count": 9990}, "geometry": { "type": "MultiPolygon", "coordinates": [ [ [ [ -0.000478, 0.000015 ], [ -0.000464, 0.000017 ], [ -0.000463, 0.000009 ], [ -0.000459, 0.000008 ], [ -0.000455, 0.000017 ], [ -0.000443, 0.000012 ], [ -0.000446, 0.000004 ], [ -0.000444, 0.0 ], [ -0.000433, 0.000003 ], [ -0.00043, -0.00001 ], [ -0.000438, -0.000013 ], [ -0.000438, -0.000016 ], [ -0.000428, -0.000017 ], [ -0.000429, -0.000038 ], [ -0.000438, -0.000038 ], [ -0.000435, -0.000043 ], [ -0.000445, -0.000052 ], [ -0.000449, -0.000049 ], [ -0.000455, -0.00005 ], [ -0.000455, -0.000057 ], [ -0.000469, -0.000057 ], [ -0.000469, -0.000049 ], [ -0.000474, -0.000049 ], [ -0.00048, -0.000055 ], [ -0.000489, -0.000047 ], [ -0.000488, -0.000044 ], [ -0.000496, -0.000034 ], [ -0.000491, -0.000028 ], [ -0.000491, -0.000026 ], [ -0.0005, -0.000024 ], [ -0.000496, -0.00001 ], [ -0.000492, -0.00001 ], [ -0.000492, -0.000006 ], [ -0.000495, -0.0 ], [ -0.000484, 0.000009 ], [ -0.000481, 0.000006 ], [ -0.000477, 0.000007 ], [ -0.000477, 0.000011 ], [ -0.000478, 0.000015 ] ] ], [ [ [ 0.000084, 0.000257 ], [ 0.000107, 0.000285 ], [ 0.000121, 0.000274 ], [ 0.000149, 0.000277 ], [ 0.000183, 0.00028 ], [ 0.000177, 0.000285 ], [ 0.00019, 0.0003 ], [ 0.000195, 0.000296 ], [ 0.000204, 0.000302 ], [ 0.000212, 0.000303 ], [ 0.000221, 0.000302 ], [ 0.000228, 0.000307 ], [ 0.000243, 0.000292 ], [ 0.000239, 0.000288 ], [ 0.000243, 0.000278 ], [ 0.000243, 0.000265 ], [ 0.000249, 0.000262 ], [ 0.000238, 0.000244 ], [ 0.000233, 0.000246 ], [ 0.000225, 0.000241 ], [ 0.00021, 0.00024 ], [ 0.000202, 0.000242 ], [ 0.000196, 0.000237 ], [ 0.000183, 0.000247 ], [ 0.000186, 0.000251 ], [ 0.000129, 0.000246 ], [ 0.000136, 0.000245 ], [ 0.000146, 0.000233 ], [ 0.000144, 0.000227 ], [ 0.000148, 0.000225 ], [ 0.000151, 0.000218 ], [ 0.000155, 0.000218 ], [ 0.00016, 0.000223 ], [ 0.000174, 0.000217 ], [ 0.000181, 0.000221 ], [ 0.000185, 0.000221 ], [ 0.000187, 0.000216 ], [ 0.000211, 0.000217 ], [ 0.000216, 0.000213 ], [ 0.000228, 0.000203 ], [ 0.000239, 0.000203 ], [ 0.000248, 0.000199 ], [ 0.000258, 0.000199 ], [ 0.000259, 0.000196 ], [ 0.000258, 0.00019 ], [ 0.000261, 0.000185 ], [ 0.000262, 0.000166 ], [ 0.000258, 0.000158 ], [ 0.000239, 0.000158 ], [ 0.000238, 0.000144 ], [ 0.000242, 0.000141 ], [ 0.000246, 0.000145 ], [ 0.000251, 0.000144 ], [ 0.000253, 0.000139 ], [ 0.000257, 0.000141 ], [ 0.000262, 0.000139 ], [ 0.000258, 0.000132 ], [ 0.000259, 0.000127 ], [ 0.000265, 0.000125 ], [ 0.00027, 0.000121 ], [ 0.000274, 0.000124 ], [ 0.000281, 0.000125 ], [ 0.000285, 0.00012 ], [ 0.000292, 0.00012 ], [ 0.000296, 0.000116 ], [ 0.000296, 0.000107 ], [ 0.000301, 0.000107 ], [ 0.000304, 0.00011 ], [ 0.000309, 0.00011 ], [ 0.000315, 0.000103 ], [ 0.000316, 0.000095 ], [ 0.000323, 0.000091 ], [ 0.000328, 0.000094 ], [ 0.000333, 0.000091 ], [ 0.000337, 0.000086 ], [ 0.000344, 0.000082 ], [ 0.000348, 0.000082 ], [ 0.00035, 0.000079 ], [ 0.000351, 0.000074 ], [ 0.000356, 0.000073 ], [ 0.000356, 0.000077 ], [ 0.000359, 0.000077 ], [ 0.000363, 0.000072 ], [ 0.000363, 0.00007 ], [ 0.000373, 0.00007 ], [ 0.000379, 0.000068 ], [ 0.000379, 0.000064 ], [ 0.000369, 0.000052 ], [ 0.000372, 0.000049 ], [ 0.000372, 0.000043 ], [ 0.000368, 0.000038 ], [ 0.000363, 0.000017 ], [ 0.000355, 0.000019 ], [ 0.000347, 0.000008 ], [ 0.000335, 0.000015 ], [ 0.000331, 0.000012 ], [ 0.000331, 0.000006 ], [ 0.000325, 0.000006 ], [ 0.000329, -0.000008 ], [ 0.000322, -0.000011 ], [ 0.000322, -0.000019 ], [ 0.000318, -0.00002 ], [ 0.000318, -0.000028 ], [ 0.000312, -0.000029 ], [ 0.000306, -0.000024 ], [ 0.000303, -0.000025 ], [ 0.000305, -0.000037 ], [ 0.000297, -0.000038 ], [ 0.000297, -0.000044 ], [ 0.000285, -0.000044 ], [ 0.000285, -0.000048 ], [ 0.000279, -0.000049 ], [ 0.000276, -0.000055 ], [ 0.000278, -0.00006 ], [ 0.000278, -0.000067 ], [ 0.000281, -0.000066 ], [ 0.000285, -0.00007 ], [ 0.000295, -0.000064 ], [ 0.000298, -0.000071 ], [ 0.000304, -0.00007 ], [ 0.000306, -0.000074 ], [ 0.000304, -0.000079 ], [ 0.000308, -0.000079 ], [ 0.00031, -0.000093 ], [ 0.000305, -0.000094 ], [ 0.000305, -0.000099 ], [ 0.000298, -0.000105 ], [ 0.000298, -0.00011 ], [ 0.0003, -0.00011 ], [ 0.000304, -0.000106 ], [ 0.00031, -0.000109 ], [ 0.000306, -0.000116 ], [ 0.00031, -0.000125 ], [ 0.000311, -0.000129 ], [ 0.000309, -0.000133 ], [ 0.000315, -0.00014 ], [ 0.00032, -0.00014 ], [ 0.000322, -0.000144 ], [ 0.00032, -0.000149 ], [ 0.000325, -0.000157 ], [ 0.000336, -0.000157 ], [ 0.000337, -0.00016 ], [ 0.000333, -0.000165 ], [ 0.000333, -0.000169 ], [ 0.000336, -0.000173 ], [ 0.000335, -0.000176 ], [ 0.000332, -0.000178 ], [ 0.000332, -0.000182 ], [ 0.000329, -0.000183 ], [ 0.000328, -0.000187 ], [ 0.000334, -0.000193 ], [ 0.000334, -0.000205 ], [ 0.000331, -0.000206 ], [ 0.00033, -0.000219 ], [ 0.000319, -0.00022 ], [ 0.00032, -0.00023 ], [ 0.000317, -0.000231 ], [ 0.000316, -0.000237 ], [ 0.000312, -0.000237 ], [ 0.000311, -0.000249 ], [ 0.000308, -0.000255 ], [ 0.000302, -0.000253 ], [ 0.000297, -0.000259 ], [ 0.000286, -0.00026 ], [ 0.000282, -0.000253 ], [ 0.000274, -0.000253 ], [ 0.00027, -0.000246 ], [ 0.000261, -0.000255 ], [ 0.000258, -0.000254 ], [ 0.000256, -0.000248 ], [ 0.000253, -0.000248 ], [ 0.000253, -0.000254 ], [ 0.000249, -0.000261 ], [ 0.00025, -0.000268 ], [ 0.000245, -0.000272 ], [ 0.000238, -0.000274 ], [ 0.000237, -0.00028 ], [ 0.000233, -0.00028 ], [ 0.000228, -0.000276 ], [ 0.00022, -0.000279 ], [ 0.000219, -0.000271 ], [ 0.000215, -0.000269 ], [ 0.000215, -0.000274 ], [ 0.000202, -0.000274 ], [ 0.000198, -0.000279 ], [ 0.000192, -0.000276 ], [ 0.000188, -0.000279 ], [ 0.000184, -0.000275 ], [ 0.000181, -0.000273 ], [ 0.000181, -0.000268 ], [ 0.000181, -0.00026 ], [ 0.000181, -0.000256 ], [ 0.000174, -0.000257 ], [ 0.000164, -0.000247 ], [ 0.000164, -0.000242 ], [ 0.000159, -0.000244 ], [ 0.000156, -0.000244 ], [ 0.000155, -0.000238 ], [ 0.000152, -0.000237 ], [ 0.000151, -0.000231 ], [ 0.000158, -0.000228 ], [ 0.000158, -0.00022 ], [ 0.000162, -0.000213 ], [ 0.000161, -0.000209 ], [ 0.000161, -0.000205 ], [ 0.000152, -0.000209 ], [ 0.000151, -0.000204 ], [ 0.000145, -0.000207 ], [ 0.000143, -0.000206 ], [ 0.00014, -0.000195 ], [ 0.000136, -0.000195 ], [ 0.000131, -0.000188 ], [ 0.000138, -0.000179 ], [ 0.000139, -0.00017 ], [ 0.000142, -0.000168 ], [ 0.000142, -0.000165 ], [ 0.000135, -0.000168 ], [ 0.000133, -0.000167 ], [ 0.000128, -0.000171 ], [ 0.000121, -0.000163 ], [ 0.000114, -0.000165 ], [ 0.000114, -0.000159 ], [ 0.000106, -0.00016 ], [ 0.000106, -0.000155 ], [ 0.000104, -0.000151 ], [ 0.000105, -0.000147 ], [ 0.000108, -0.000144 ], [ 0.000108, -0.000139 ], [ 0.000101, -0.000142 ], [ 0.0001, -0.000157 ], [ 0.000101, -0.000162 ], [ 0.000098, -0.000165 ], [ 0.000093, -0.000163 ], [ 0.000091, -0.000158 ], [ 0.000046, -0.000158 ], [ 0.000046, -0.000163 ], [ 0.000052, -0.000165 ], [ 0.000066, -0.000169 ], [ 0.000078, -0.000176 ], [ 0.000089, -0.000178 ], [ 0.000096, -0.00018 ], [ 0.000097, -0.000184 ], [ 0.00009, -0.000191 ], [ 0.000083, -0.000193 ], [ 0.000072, -0.000204 ], [ 0.000065, -0.000209 ], [ 0.000049, -0.000216 ], [ 0.000031, -0.000223 ], [ 0.000003, -0.000228 ], [ -0.000022, -0.00023 ], [ -0.000046, -0.00023 ], [ -0.000077, -0.00023 ], [ -0.000081, -0.000203 ], [ -0.000081, -0.000184 ], [ -0.000083, -0.000186 ], [ -0.00009, -0.000185 ], [ -0.000094, -0.000188 ], [ -0.000094, -0.000193 ], [ -0.00009, -0.000197 ], [ -0.000091, -0.0002 ], [ -0.000095, -0.000201 ], [ -0.000097, -0.000209 ], [ -0.0001, -0.000215 ], [ -0.000107, -0.000213 ], [ -0.000109, -0.000218 ], [ -0.000117, -0.000219 ], [ -0.000122, -0.000215 ], [ -0.000127, -0.000216 ], [ -0.000133, -0.000219 ], [ -0.000137, -0.000217 ], [ -0.000138, -0.000213 ], [ -0.000144, -0.000213 ], [ -0.000145, -0.000209 ], [ -0.00015, -0.000207 ], [ -0.000154, -0.00021 ], [ -0.000157, -0.000206 ], [ -0.000163, -0.000206 ], [ -0.000166, -0.00021 ], [ -0.000171, -0.00021 ], [ -0.000172, -0.000204 ], [ -0.000173, -0.0002 ], [ -0.000177, -0.000195 ], [ -0.000183, -0.000196 ], [ -0.000187, -0.000192 ], [ -0.000192, -0.000192 ], [ -0.000195, -0.000189 ], [ -0.000195, -0.000183 ], [ -0.000198, -0.000182 ], [ -0.000198, -0.000175 ], [ -0.000195, -0.000167 ], [ -0.000198, -0.000166 ], [ -0.000201, -0.000175 ], [ -0.000206, -0.000176 ], [ -0.000208, -0.000179 ], [ -0.000211, -0.00018 ], [ -0.000215, -0.000177 ], [ -0.000221, -0.000176 ], [ -0.000222, -0.00017 ], [ -0.000226, -0.000168 ], [ -0.000226, -0.000156 ], [ -0.00023, -0.000156 ], [ -0.00023, -0.00015 ], [ -0.000238, -0.000141 ], [ -0.000238, -0.000134 ], [ -0.000236, -0.000131 ], [ -0.000238, -0.000129 ], [ -0.000238, -0.000123 ], [ -0.000243, -0.000123 ], [ -0.000246, -0.00013 ], [ -0.00025, -0.00013 ], [ -0.000252, -0.000127 ], [ -0.000256, -0.000127 ], [ -0.000258, -0.000123 ], [ -0.000257, -0.000115 ], [ -0.000264, -0.000109 ], [ -0.000271, -0.000114 ], [ -0.000274, -0.000111 ], [ -0.000273, -0.000105 ], [ -0.000277, -0.000103 ], [ -0.000276, -0.000099 ], [ -0.000286, -0.0001 ], [ -0.00029, -0.000104 ], [ -0.000295, -0.000098 ], [ -0.000295, -0.000095 ], [ -0.0003, -0.000094 ], [ -0.000309, -0.00009 ], [ -0.000313, -0.000096 ], [ -0.000323, -0.000093 ], [ -0.000327, -0.000088 ], [ -0.000336, -0.000093 ], [ -0.000339, -0.000092 ], [ -0.000339, -0.000083 ], [ -0.000343, -0.000082 ], [ -0.000343, -0.000076 ], [ -0.000342, -0.000074 ], [ -0.000342, -0.000071 ], [ -0.000347, -0.000071 ], [ -0.000345, -0.000058 ], [ -0.000351, -0.000051 ], [ -0.00035, -0.000043 ], [ -0.000354, -0.00004 ], [ -0.000359, -0.000043 ], [ -0.000362, -0.000037 ], [ -0.000368, -0.000038 ], [ -0.000371, -0.000035 ], [ -0.000371, -0.000026 ], [ -0.000374, -0.000025 ], [ -0.000379, -0.000025 ], [ -0.000378, -0.000013 ], [ -0.000385, -0.000011 ], [ -0.000385, -0.0 ], [ -0.000382, 0.000002 ], [ -0.000382, 0.000012 ], [ -0.000389, 0.000012 ], [ -0.000391, 0.000015 ], [ -0.000392, 0.000021 ], [ -0.000383, 0.000033 ], [ -0.000384, 0.000038 ], [ -0.000375, 0.000052 ], [ -0.000368, 0.000054 ], [ -0.000369, 0.00006 ], [ -0.000364, 0.000064 ], [ -0.00036, 0.000064 ], [ -0.000353, 0.000064 ], [ -0.000343, 0.000069 ], [ -0.000338, 0.000069 ], [ -0.000332, 0.00007 ], [ -0.000327, 0.000077 ], [ -0.000327, 0.000084 ], [ -0.000318, 0.000089 ], [ -0.000311, 0.00009 ], [ -0.000306, 0.000094 ], [ -0.000297, 0.000094 ], [ -0.000298, 0.000104 ], [ -0.000289, 0.00011 ], [ -0.000283, 0.000106 ], [ -0.000275, 0.000103 ], [ -0.000276, 0.000116 ], [ -0.000285, 0.000117 ], [ -0.000285, 0.000124 ], [ -0.000278, 0.000128 ], [ -0.000278, 0.000143 ], [ -0.000276, 0.000145 ], [ -0.00027, 0.000146 ], [ -0.000264, 0.00016 ], [ -0.000262, 0.000165 ], [ -0.000252, 0.000162 ], [ -0.000246, 0.000173 ], [ -0.00024, 0.000172 ], [ -0.000229, 0.00018 ], [ -0.000223, 0.00018 ], [ -0.00022, 0.000191 ], [ -0.000215, 0.000193 ], [ -0.000211, 0.000203 ], [ -0.000197, 0.000215 ], [ -0.000195, 0.000222 ], [ -0.000181, 0.000217 ], [ -0.000182, 0.000212 ], [ -0.000188, 0.000202 ], [ -0.000186, 0.000197 ], [ -0.000186, 0.000193 ], [ -0.000188, 0.000183 ], [ -0.000183, 0.000183 ], [ -0.000176, 0.000174 ], [ -0.000172, 0.000174 ], [ -0.000163, 0.000185 ], [ -0.000159, 0.000186 ], [ -0.000154, 0.00019 ], [ -0.000148, 0.000184 ], [ -0.000138, 0.000191 ], [ -0.000126, 0.000189 ], [ -0.000125, 0.000182 ], [ -0.000118, 0.000181 ], [ -0.000117, 0.00019 ], [ -0.000105, 0.000191 ], [ -0.000105, 0.000211 ], [ -0.000108, 0.000218 ], [ -0.000103, 0.00022 ], [ -0.000102, 0.00023 ], [ -0.000091, 0.000235 ], [ -0.000087, 0.000233 ], [ -0.000087, 0.00024 ], [ -0.000083, 0.000245 ], [ -0.000075, 0.000247 ], [ -0.000065, 0.000238 ], [ -0.000059, 0.000237 ], [ -0.000058, 0.000234 ], [ -0.00004, 0.000233 ], [ -0.000039, 0.000241 ], [ -0.00003, 0.000243 ], [ -0.00003, 0.000236 ], [ -0.000026, 0.000236 ], [ -0.000018, 0.000222 ], [ -0.00001, 0.000222 ], [ -0.000007, 0.000217 ], [ -0.000003, 0.000217 ], [ 0.000003, 0.000211 ], [ 0.00001, 0.000215 ], [ 0.000012, 0.000222 ], [ 0.000017, 0.000222 ], [ 0.00003, 0.000233 ], [ 0.000034, 0.000232 ], [ 0.000036, 0.000241 ], [ 0.000065, 0.000239 ], [ 0.000069, 0.000243 ], [ 0.00007, 0.000248 ], [ 0.000064, 0.000248 ], [ 0.000054, 0.000255 ], [ 0.000057, 0.00026 ], [ 0.000057, 0.000275 ], [ 0.00006, 0.000277 ], [ 0.000064, 0.000263 ], [ 0.000069, 0.000262 ], [ 0.000073, 0.000257 ], [ 0.000084, 0.000257 ] ], [ [ -0.000073, -0.000175 ], [ -0.000066, -0.000173 ], [ -0.000043, -0.000167 ], [ -0.000017, -0.000164 ], [ -0.000016, -0.000157 ], [ -0.000057, -0.000157 ], [ -0.000058, -0.000164 ], [ -0.000062, -0.000166 ], [ -0.000066, -0.000164 ], [ -0.000067, -0.000152 ], [ -0.000072, -0.000152 ], [ -0.000072, -0.000157 ], [ -0.000068, -0.00016 ], [ -0.00007, -0.000165 ], [ -0.00007, -0.000171 ], [ -0.000073, -0.000175 ] ], [ [ -0.000007, -0.000157 ], [ -0.000007, -0.000162 ], [ 0.000015, -0.000161 ], [ 0.000037, -0.000162 ], [ 0.000037, -0.000158 ], [ -0.000007, -0.000157 ] ] ] ] } }, { "type": "Feature", "properties": {"source count": 1, "name": "Null Ranch", "source dates": "2017-03-12 21:54:49.107291+00:00", "source paths": "sources/us/ks/null-ranch.json", "ISO 3166": null, "ISO 3166-2": null, "US Census GEOID": null, "status": "good", "address count": 9}, "geometry": { "type": "Polygon", "coordinates": [[[-99, 38], [-99, 39], [-98, 39], [-98, 38], [-99, 38]]] } }\n]\n}\n'''
return response(200, null_geojson.encode('utf8'), headers={'Content-Type': 'application/json'})
raise Exception()
with HTTMock(response_geojson):
calculate.calculate(DATABASE_URL)
with psycopg2.connect(DATABASE_URL) as conn:
with conn.cursor() as db:
db.execute('select iso_a2, addr_count, area_total, area_pct, pop_total, pop_pct from areas order by iso_a2')
(row1, row2) = db.fetchall()
self.assertEqual(row1, ('US', 9, 9540, 1.0, 17907, 1.0))
self.assertEqual(row2, ('XX', 9990, 2000, 1.0, 20000, 1.0))
| [
"[email protected]"
]
| |
b26444ad2d6f2216e041816a9cd9a0238f7491e6 | 6d493d09085d4d398132204925078a179774f138 | /melgan_vocoder.py | 2ec8f713892afcce0d01ff4faa4f26ebc87935ea | [
"MIT"
]
| permissive | zongxiangli/CycleGAN-VC3 | 6a41f843b430fd307d9ea0b43aa5910816fba450 | 431b332fa17638391ca913e6821b526456fd874f | refs/heads/main | 2023-02-21T02:19:39.058010 | 2021-01-25T09:49:00 | 2021-01-25T09:49:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,448 | py | #!python
# -*- coding: utf-8 -*-
import os
import yaml
from pathlib import Path
import torch
import torch.nn as nn
from torch.nn.utils import weight_norm
from feature_utils import Audio2Mel
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def WNConv1d(*args, **kwargs):
return weight_norm(nn.Conv1d(*args, **kwargs))
def WNConvTranspose1d(*args, **kwargs):
return weight_norm(nn.ConvTranspose1d(*args, **kwargs))
class ResnetBlock(nn.Module):
def __init__(self, dim, dilation=1):
super().__init__()
self.block = nn.Sequential(
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(dilation),
WNConv1d(dim, dim, kernel_size=3, dilation=dilation),
nn.LeakyReLU(0.2),
WNConv1d(dim, dim, kernel_size=1),
)
self.shortcut = WNConv1d(dim, dim, kernel_size=1)
def forward(self, x):
return self.shortcut(x) + self.block(x)
class Generator(nn.Module):
def __init__(self, input_size, ngf, n_residual_layers):
super().__init__()
ratios = [8, 8, 2, 2]
self.hop_length = np.prod(ratios)
mult = int(2 ** len(ratios))
model = [
nn.ReflectionPad1d(3),
WNConv1d(input_size, mult * ngf, kernel_size=7, padding=0),
]
# Upsample to raw audio scale
for i, r in enumerate(ratios):
model += [
nn.LeakyReLU(0.2),
WNConvTranspose1d(
mult * ngf,
mult * ngf // 2,
kernel_size=r * 2,
stride=r,
padding=r // 2 + r % 2,
output_padding=r % 2,
),
]
for j in range(n_residual_layers):
model += [ResnetBlock(mult * ngf // 2, dilation=3 ** j)]
mult //= 2
model += [
nn.LeakyReLU(0.2),
nn.ReflectionPad1d(3),
WNConv1d(ngf, 1, kernel_size=7, padding=0),
nn.Tanh(),
]
self.model = nn.Sequential(*model)
self.apply(weights_init)
def forward(self, x):
return self.model(x)
def get_default_device():
if torch.cuda.is_available():
return "cuda"
else:
return "cpu"
def load_model(mel2wav_path, device=get_default_device()):
"""
Args:
mel2wav_path (str or Path): path to the root folder of dumped text2mel
device (str or torch.device): device to load the model
"""
root = Path(mel2wav_path)
with open(root / "args.yml", "r") as f:
args = yaml.load(f, Loader=yaml.FullLoader)
netG = Generator(args.n_mel_channels, args.ngf, args.n_residual_layers).to(device)
netG.load_state_dict(torch.load(root / "best_netG.pt", map_location=device))
return netG
class MelVocoder:
def __init__(
self,
path,
device=get_default_device(),
github=False,
model_name="multi_speaker",
):
self.fft = Audio2Mel().to(device)
if github:
netG = Generator(80, 32, 3).to(device)
root = Path(os.path.dirname(__file__)).parent
netG.load_state_dict(
torch.load(root / f"models/{model_name}.pt", map_location=device)
)
self.mel2wav = netG
else:
self.mel2wav = load_model(path, device)
self.device = device
def __call__(self, audio):
"""
Performs audio to mel conversion (See Audio2Mel in mel2wav/modules.py)
Args:
audio (torch.tensor): PyTorch tensor containing audio (batch_size, timesteps)
Returns:
torch.tensor: log-mel-spectrogram computed on input audio (batch_size, 80, timesteps)
"""
return self.fft(audio.unsqueeze(1).to(self.device))
def inverse(self, mel):
"""
Performs mel2audio conversion
Args:
mel (torch.tensor): PyTorch tensor containing log-mel spectrograms (batch_size, 80, timesteps)
Returns:
torch.tensor: Inverted raw audio (batch_size, timesteps)
"""
with torch.no_grad():
return self.mel2wav(mel.to(self.device)).squeeze(1)
| [
"[email protected]"
]
| |
c72d9299bc10665a4db3242dbdca70d84cf13520 | 68ea05d0d276441cb2d1e39c620d5991e0211b94 | /2714.py | c816933a2eed56ec8282d45061a5d42bbd7766f2 | []
| no_license | mcavalca/uri-python | 286bc43aa157d3a6880dc222e0136c80cf079565 | e22875d2609fe7e215f9f3ed3ca73a1bc2cf67be | refs/heads/master | 2021-11-23T08:35:17.614443 | 2021-10-05T13:26:03 | 2021-10-05T13:26:03 | 131,339,175 | 50 | 27 | null | 2021-11-22T12:21:59 | 2018-04-27T19:54:09 | Python | UTF-8 | Python | false | false | 221 | py | n = int(input())
while n > 0:
n -= 1
ra = input()
saida = 'INVALID DATA'
if len(ra) == 20:
if ra[0:2] == 'RA':
if ra[2:].isdigit():
saida = int(ra[2:])
print(saida)
| [
"[email protected]"
]
| |
c22cd593f5f83ae3732d104ca10c62e681b4363f | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_159/609.py | 1d4cb7959af1112bc540d578dcf82f9dfd5fc3ae | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | f = open('A-large.in')
#f = open('test.in')
count = int(f.readline())
output = ''
for x in xrange(1, count + 1):
platesCount = int(f.readline())
arr = f.readline().split()
case1 = 0
case2 = 0
case2MaxGap = 0
for i in xrange(0, platesCount - 1):
curPlate = int(arr[i])
nextPlate = int(arr[i+1])
gap = curPlate - nextPlate
case2MaxGap = max(case2MaxGap, gap)
if gap > 0:
case1 += gap
for j in xrange(0, platesCount - 1):
curPlate = int(arr[j])
if curPlate < case2MaxGap:
case2 += curPlate
else:
case2 += case2MaxGap
output += 'Case #' + str(x) + ': ' + str(case1) + ' ' + str(case2) + '\n'
print(output)
newf = open('output.txt','w')
newf.write(output)
| [
"[email protected]"
]
| |
4d71975bc09e3c0a6e6ee256fd6840bf15111f68 | cf0ab8503d4d704045070deea1e2125375711e86 | /apps/sockets/tests/test_importer.py | d4c7afc7997e84b3c918c6e4dc2634f31084008f | []
| no_license | faierbol/syncano-platform | c3c6468600115752fd9fa5e46a0ad59f75f6bc9c | 879111874d1ef70418b4890cf970720b0a2be4d8 | refs/heads/master | 2023-07-20T10:13:40.066127 | 2021-02-08T15:01:13 | 2021-02-08T15:01:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,405 | py | # coding=UTF8
from unittest import mock
from django.test import TestCase
from django.utils import timezone
from apps.sockets.exceptions import ObjectProcessingError, SocketConfigValidationError, SocketMissingFile
from apps.sockets.importer import INTERVAL_REGEX, SocketImporter
from apps.sockets.models import Socket
from apps.sockets.validators import CustomSocketConfigValidator
@mock.patch('apps.sockets.signal_handlers.SocketProcessorTask', mock.MagicMock())
@mock.patch('apps.sockets.download_utils.ZipDownloadFileHandler.get_socket_spec')
class TestSocketImporter(TestCase):
importer_class = SocketImporter
@mock.patch('apps.sockets.download_utils.ZipDownloadFileHandler.read_file',
mock.Mock(side_effect=SocketMissingFile('error')))
def process_socket(self, download_mock, socket_source, **kwargs):
socket = Socket(created_at=timezone.now(), **kwargs)
download_mock.return_value = socket_source
return socket, self.importer_class(socket).process()
def assert_validation(self, download_mock, error_msg, socket_source, line=None):
with self.assertRaisesMessage(ObjectProcessingError, error_msg) as cm:
self.process_socket(download_mock, socket_source)
if line is not None:
self.assertEqual(cm.exception.lineno, line,
'Lines not equal for: "{}"; Expected: {}, got: {}.'.format(str(cm.exception),
line, cm.exception.lineno))
def assert_validation_with_config(self, download_mock, error_msg, socket_source, config=None):
with self.assertRaisesMessage(SocketConfigValidationError, error_msg):
socket, _ = self.process_socket(download_mock, socket_source, config=config or {})
CustomSocketConfigValidator().validate(socket_config=socket.config,
meta_config=socket.metadata.get('config') or {})
def test_serializer_validation(self, download_mock):
self.assert_validation(download_mock, 'No calls defined',
"""
endpoints:
my_endpoint_#1:
script: script_endpoint_1
""", line=3)
def test_basic_validation(self, download_mock):
self.assert_validation(download_mock, 'Too many properties',
'\n'.join(['name{}: name'.format(i)
for i in range(self.importer_class.max_number_of_keys + 1)]))
self.assert_validation(download_mock, 'Wrong format',
'- wrong format')
def test_endpoints_validation(self, download_mock):
self.assert_validation(download_mock, 'No calls defined',
"""
endpoints:
endpoint1: {}
""", line=3)
def test_cache_validation(self, download_mock):
self.assert_validation(download_mock, 'Invalid cache value',
"""
endpoints:
endpoint1:
cache: 100000
source: |
print 1
""", line=3)
def test_timeout_validation(self, download_mock):
self.assert_validation(download_mock, 'Invalid timeout value',
"""
endpoints:
endpoint1:
timeout: 100000
source: |
print 1
""", line=3)
def test_script_endpoints_format_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
- endpoint1
""", line=3)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
- script
""", line=4)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
file:
- script.py
""", line=5)
self.assert_validation(download_mock, 'Source file path contains invalid characters',
"""
endpoints:
endpoint1:
file: <script.py
""", line=3)
self.assert_validation(download_mock, 'Source file path is too long',
"""
endpoints:
endpoint1:
file: {}
""".format('a' * 500), line=3)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
POST:
- script
""", line=5)
def test_channel_endpoints_format_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
channel:
- script
""", line=5)
self.assert_validation(download_mock, 'Wrong format',
"""
endpoints:
endpoint1:
channel: something.{a!bc}.{user}
""", line=4)
self.process_socket(download_mock, """
endpoints:
endpoint1:
channel: something.{ABC}.{user}
""")
self.process_socket(download_mock, """
endpoints:
endpoint1: |
channels.publish("a")
""")
def test_config_validation(self, download_mock):
self.assert_validation_with_config(
download_mock,
'Error validating socket config. "user_key" is required.',
"""
config:
secret_key:
value: some value
user_key:
required: true
value: some value
""")
for socket_yml in (
"""
config:
key: null
""",
"""
config:
- value
"""):
self.assert_validation_with_config(
download_mock,
'Error validating socket config. Wrong format.',
socket_yml)
def test_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format',
"""
event_handlers:
- eh
""", line=3)
self.assert_validation(download_mock, 'Wrong format',
"""
event_handlers:
data.user.create:
- src
""", line=4)
self.assert_validation(download_mock, 'Unsupported event handler type',
"""
event_handlers:
something.bla.bla: |
print 1
""", line=3)
def test_data_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format for data event handler',
"""
event_handlers:
data.usercreate: |
print 1
""", line=3)
def test_schedule_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format for schedule event handler',
"""
event_handlers:
schedule.interval#5_minutes: |
print 1
""", line=3)
self.assert_validation(download_mock, 'Wrong format for schedule interval',
"""
event_handlers:
schedule.interval.5_zonks: |
print 1
""", line=3)
self.assert_validation(download_mock, 'Wrong type of schedule event handler',
"""
event_handlers:
schedule.intercal.5_minutes: |
print 1
""", line=3)
def test_custom_event_handlers_validation(self, download_mock):
self.assert_validation(download_mock, 'Wrong format for event handler',
"""
event_handlers:
events: |
print 1
""", line=3)
self.assert_validation(download_mock, 'Wrong format for event handler',
"""
event_handlers:
events.socket1.event2.suffix: |
print 1
""", line=3)
class TestSocketEventHandler(TestCase):
def calculate_interval(self, interval_str):
match = INTERVAL_REGEX.match(interval_str)
if not match:
return None
interval_dict = match.groupdict(0)
return int(interval_dict['hours']) * 60 * 60 + int(interval_dict['minutes']) * 60 + \
int(interval_dict['seconds'])
def test_schedule_interval_regex(self):
for interval_str, value in (
('5h', 5 * 60 * 60),
('5m', 5 * 60),
('5s', 5),
('5_hours_10_minutes_30_seconds', 5 * 60 * 60 + 10 * 60 + 30),
('1_hour_1_minute_1_second', 1 * 60 * 60 + 1 * 60 + 1),
('1h_2m_3s', 1 * 60 * 60 + 2 * 60 + 3),
('1h_2m_3s', 1 * 60 * 60 + 2 * 60 + 3),
('3s_2m', None),
('2m_1h', None),
('1_hor', None),
):
self.assertEqual(self.calculate_interval(interval_str), value)
| [
"[email protected]"
]
| |
0f3ce92a2ff9742a1df0452ef3c71ce7e361bd2b | f8ad6963bfc851657ea50c6a036cfad29cdd7f60 | /Books/LearningTensorFlow/Chapter5_Text_Sequence_Tensorboard/scan_example.py | 4cf7e1f4fa42316220ed1621d22dc6ddfdcbd77a | []
| no_license | foru120/PythonRepository | e1ab0265c0f50ef2e9acdf7447237c913560692b | db6b6be0f9fb91b0a81a3b6a2ec5631daab10f98 | refs/heads/master | 2021-01-01T06:53:11.728109 | 2019-04-25T13:52:50 | 2019-04-25T13:52:50 | 97,541,222 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import numpy as np
import tensorflow as tf
elems = np.array(['T', 'e', 'n', 's', 'o', 'r', ' ', 'F', 'l', 'o', 'w'])
scan_sum = tf.scan(lambda a, x: a + x, elems)
sess = tf.InteractiveSession()
print(sess.run(scan_sum))
sess.close() | [
"[email protected]"
]
| |
e9880252c3b8871fcba2e2f278da14e2c40131dc | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v10/services/services/custom_conversion_goal_service/client.py | 6a590a9ac28792be1d45d85fcb1db11c168b6b0e | [
"Apache-2.0"
]
| permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 21,247 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v10.services.types import (
custom_conversion_goal_service,
)
from .transports.base import (
CustomConversionGoalServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import CustomConversionGoalServiceGrpcTransport
class CustomConversionGoalServiceClientMeta(type):
"""Metaclass for the CustomConversionGoalService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CustomConversionGoalServiceTransport]]
_transport_registry["grpc"] = CustomConversionGoalServiceGrpcTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[CustomConversionGoalServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CustomConversionGoalServiceClient(
metaclass=CustomConversionGoalServiceClientMeta
):
"""Service to manage custom conversion goal."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomConversionGoalServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CustomConversionGoalServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CustomConversionGoalServiceTransport:
"""Returns the transport used by the client instance.
Returns:
CustomConversionGoalServiceTransport: The transport used by the client
instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def conversion_action_path(
customer_id: str,
conversion_action_id: str,
) -> str:
"""Returns a fully-qualified conversion_action string."""
return "customers/{customer_id}/conversionActions/{conversion_action_id}".format(
customer_id=customer_id,
conversion_action_id=conversion_action_id,
)
@staticmethod
def parse_conversion_action_path(path: str) -> Dict[str, str]:
"""Parses a conversion_action path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/conversionActions/(?P<conversion_action_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def custom_conversion_goal_path(
customer_id: str,
goal_id: str,
) -> str:
"""Returns a fully-qualified custom_conversion_goal string."""
return "customers/{customer_id}/customConversionGoals/{goal_id}".format(
customer_id=customer_id,
goal_id=goal_id,
)
@staticmethod
def parse_custom_conversion_goal_path(path: str) -> Dict[str, str]:
"""Parses a custom_conversion_goal path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/customConversionGoals/(?P<goal_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[
str, CustomConversionGoalServiceTransport, None
] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the custom conversion goal service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, CustomConversionGoalServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
if os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") not in (
"true",
"false",
):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
use_client_cert = (
os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") == "true"
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT
if is_mtls
else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CustomConversionGoalServiceTransport):
# transport is a CustomConversionGoalServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
)
def mutate_custom_conversion_goals(
self,
request: Union[
custom_conversion_goal_service.MutateCustomConversionGoalsRequest,
dict,
] = None,
*,
customer_id: str = None,
operations: Sequence[
custom_conversion_goal_service.CustomConversionGoalOperation
] = None,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> custom_conversion_goal_service.MutateCustomConversionGoalsResponse:
r"""Creates, updates or removes custom conversion goals.
Operation statuses are returned.
Args:
request (Union[google.ads.googleads.v10.services.types.MutateCustomConversionGoalsRequest, dict]):
The request object. Request message for
[CustomConversionGoalService.MutateCustomConversionGoals][google.ads.googleads.v10.services.CustomConversionGoalService.MutateCustomConversionGoals].
customer_id (str):
Required. The ID of the customer
whose custom conversion goals are being
modified.
This corresponds to the ``customer_id`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
operations (Sequence[google.ads.googleads.v10.services.types.CustomConversionGoalOperation]):
Required. The list of operations to
perform on individual custom conversion
goal.
This corresponds to the ``operations`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.ads.googleads.v10.services.types.MutateCustomConversionGoalsResponse:
Response message for a custom
conversion goal mutate.
"""
# Create or coerce a protobuf request object.
# Quick check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([customer_id, operations])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a custom_conversion_goal_service.MutateCustomConversionGoalsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(
request,
custom_conversion_goal_service.MutateCustomConversionGoalsRequest,
):
request = custom_conversion_goal_service.MutateCustomConversionGoalsRequest(
request
)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if customer_id is not None:
request.customer_id = customer_id
if operations is not None:
request.operations = operations
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.mutate_custom_conversion_goals
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("customer_id", request.customer_id),)
),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CustomConversionGoalServiceClient",)
| [
"[email protected]"
]
| |
5184f3b5a7ed92581d910e979f58f31d1a589646 | cad83699bb72bc1d560815d5b1c34fc9b371f163 | /cartridge/project_template/manage.py | 4330dc2e1a9c13927f25c69c19985527dbbe4267 | [
"BSD-3-Clause"
]
| permissive | BeUnique/cartridge | a1903ecc78029a576e57c3832b16357c7661b1b8 | e5b887fed96d01ab93237f345fc420bcbe56d027 | refs/heads/master | 2021-01-19T06:00:01.090292 | 2011-08-31T01:04:33 | 2011-08-31T01:04:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,284 | py | #!/usr/bin/env python
# When project_template is used as the actual project during Mezzanine
# development, insert the development path into sys.path so that the
# development version of Mezzanine is used rather than the installed version.
import os
import sys
project_path = os.path.dirname(os.path.abspath(__file__))
project_dir = project_path.split(os.sep)[-1]
if project_dir == "project_template":
dev_path = os.path.abspath(os.path.join(project_path, "..", ".."))
if dev_path not in sys.path:
sys.path.insert(0, dev_path)
import cartridge
cartridge_path = os.path.dirname(os.path.abspath(cartridge.__file__))
assert os.path.abspath(os.path.join(cartridge_path, "..")) == dev_path
from django.core.management import execute_manager
try:
import settings # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the "
"directory containing %r. It appears you've customized things.\n"
"You'll have to run django-admin.py, passing it your settings module.\n"
"(If the file settings.py does indeed exist, it's causing an "
"ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
execute_manager(settings)
| [
"[email protected]"
]
| |
5278d0ebc39489eb80a4b0a82ecaf609f72027a7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03813/s431513228.py | c4298e11e8a394fafce0121831f9fbfa51e6a6ab | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 260 | py |
def read_int():
return int(input().strip())
def read_ints():
return list(map(int, input().strip().split(' ')))
def solve():
x = read_int()
if x < 1200:
return 'ABC'
return 'ARC'
if __name__ == '__main__':
print(solve())
| [
"[email protected]"
]
| |
b9aae4c3e88a7792acd442dd2d9d158dd3d47ae4 | fb8cbebdf034b2f478943752d5443afc82c6eef5 | /tuirer/venv/lib/python3.6/site-packages/IPython/core/inputtransformer.py | 44ec5a1aae19b8708a617982258ffa6ec5f11bd0 | []
| no_license | fariasjr/CitiTuirer | f64e0ec93ef088f8140bb0961d2ad4ed3b59448a | deb3f7a9c2d45b8a7f54639037f097b99abdac11 | refs/heads/master | 2020-03-24T05:10:36.261050 | 2018-08-01T20:24:30 | 2018-08-01T20:24:30 | 142,477,521 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,242 | py | """Input transformer classes to support IPython special syntax.
This includes the machinery to recognise and transform ``%magic`` commands,
``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
"""
import abc
import functools
import re
from io import StringIO
from IPython.core.splitinput import LineInfo
from IPython.utils import tokenize2
from IPython.utils.tokenize2 import TokenError, generate_tokens, untokenize
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
# The escape sequences that define the syntax transformations IPython will
# apply to user input. These can NOT be just changed here: many regular
# expressions and other parts of the code may use their hardcoded values, and
# for all intents and purposes they constitute the 'IPython syntax', so they
# should be considered fixed.
ESC_SHELL = '!' # Send line to underlying system shell
ESC_SH_CAP = '!!' # Send line to system shell and capture output
ESC_HELP = '?' # Find information about object
ESC_HELP2 = '??' # Find extra-detailed information about object
ESC_MAGIC = '%' # Call magic function
ESC_MAGIC2 = '%%' # Call cell-magic function
ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
ESC_QUOTE2 = ';' # Quote all args as a single string, call
ESC_PAREN = '/' # Call first argument with rest of line as arguments
ESC_SEQUENCES = [ESC_SHELL, ESC_SH_CAP, ESC_HELP ,\
ESC_HELP2, ESC_MAGIC, ESC_MAGIC2,\
ESC_QUOTE, ESC_QUOTE2, ESC_PAREN ]
class InputTransformer(metaclass=abc.ABCMeta):
"""Abstract base class for line-based input transformers."""
@abc.abstractmethod
def push(self, line):
"""Send a line of input to the transformer, returning the transformed
input or None if the transformer is waiting for more input.
Must be overridden by subclasses.
Implementations may raise ``SyntaxError`` if the input is invalid. No
other exceptions may be raised.
"""
pass
@abc.abstractmethod
def reset(self):
"""Return, transformed any lines that the transformer has accumulated,
and reset its internal state.
Must be overridden by subclasses.
"""
pass
@classmethod
def wrap(cls, func):
"""Can be used by subclasses as a decorator, to return a factory that
will allow instantiation with the decorated object.
"""
@functools.wraps(func)
def transformer_factory(**kwargs):
return cls(func, **kwargs)
return transformer_factory
class StatelessInputTransformer(InputTransformer):
"""Wrapper for a stateless input transformer implemented as a function."""
def __init__(self, func):
self.func = func
def __repr__(self):
return "StatelessInputTransformer(func={0!r})".format(self.func)
def push(self, line):
"""Send a line of input to the transformer, returning the
transformed input."""
return self.func(line)
def reset(self):
"""No-op - exists for compatibility."""
pass
class CoroutineInputTransformer(InputTransformer):
"""Wrapper for an input transformer implemented as a coroutine."""
def __init__(self, coro, **kwargs):
# Prime it
self.coro = coro(**kwargs)
next(self.coro)
def __repr__(self):
return "CoroutineInputTransformer(coro={0!r})".format(self.coro)
def push(self, line):
"""Send a line of input to the transformer, returning the
transformed input or None if the transformer is waiting for more
input.
"""
return self.coro.send(line)
def reset(self):
"""Return, transformed any lines that the transformer has
accumulated, and reset its internal state.
"""
return self.coro.send(None)
class TokenInputTransformer(InputTransformer):
"""Wrapper for a token-based input transformer.
func should accept a list of tokens (5-tuples, see tokenize docs), and
return an iterable which can be passed to tokenize.untokenize().
"""
def __init__(self, func):
self.func = func
self.buf = []
self.reset_tokenizer()
def reset_tokenizer(self):
it = iter(self.buf)
self.tokenizer = generate_tokens(it.__next__)
def push(self, line):
self.buf.append(line + '\n')
if all(l.isspace() for l in self.buf):
return self.reset()
tokens = []
stop_at_NL = False
try:
for intok in self.tokenizer:
tokens.append(intok)
t = intok[0]
if t == tokenize2.NEWLINE or (stop_at_NL and t == tokenize2.NL):
# Stop before we try to pull a line we don't have yet
break
elif t == tokenize2.ERRORTOKEN:
stop_at_NL = True
except TokenError:
# Multi-line statement - stop and try again with the next line
self.reset_tokenizer()
return None
return self.output(tokens)
def output(self, tokens):
self.buf.clear()
self.reset_tokenizer()
return untokenize(self.func(tokens)).rstrip('\n')
def reset(self):
l = ''.join(self.buf)
self.buf.clear()
self.reset_tokenizer()
if l:
return l.rstrip('\n')
class assemble_python_lines(TokenInputTransformer):
def __init__(self):
super(assemble_python_lines, self).__init__(None)
def output(self, tokens):
return self.reset()
@CoroutineInputTransformer.wrap
def assemble_logical_lines():
"""Join lines following explicit line continuations (\)"""
line = ''
while True:
line = (yield line)
if not line or line.isspace():
continue
parts = []
while line is not None:
if line.endswith('\\') and (not has_comment(line)):
parts.append(line[:-1])
line = (yield None) # Get another line
else:
parts.append(line)
break
# Output
line = ''.join(parts)
# Utilities
def _make_help_call(target, esc, lspace, next_input=None):
"""Prepares a pinfo(2)/psearch call from a target name and the escape
(i.e. ? or ??)"""
method = 'pinfo2' if esc == '??' \
else 'psearch' if '*' in target \
else 'pinfo'
arg = " ".join([method, target])
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = arg.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
if next_input is None:
return '%sget_ipython().run_line_magic(%r, %r)' % (lspace, t_magic_name, t_magic_arg_s)
else:
return '%sget_ipython().set_next_input(%r);get_ipython().run_line_magic(%r, %r)' % \
(lspace, next_input, t_magic_name, t_magic_arg_s)
# These define the transformations for the different escape characters.
def _tr_system(line_info):
"Translate lines escaped with: !"
cmd = line_info.line.lstrip().lstrip(ESC_SHELL)
return '%sget_ipython().system(%r)' % (line_info.pre, cmd)
def _tr_system2(line_info):
"Translate lines escaped with: !!"
cmd = line_info.line.lstrip()[2:]
return '%sget_ipython().getoutput(%r)' % (line_info.pre, cmd)
def _tr_help(line_info):
"Translate lines escaped with: ?/??"
# A naked help line should just fire the intro help screen
if not line_info.line[1:]:
return 'get_ipython().show_usage()'
return _make_help_call(line_info.ifun, line_info.esc, line_info.pre)
def _tr_magic(line_info):
"Translate lines escaped with: %"
tpl = '%sget_ipython().run_line_magic(%r, %r)'
if line_info.line.startswith(ESC_MAGIC2):
return line_info.line
cmd = ' '.join([line_info.ifun, line_info.the_rest]).strip()
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
t_magic_name, _, t_magic_arg_s = cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return tpl % (line_info.pre, t_magic_name, t_magic_arg_s)
def _tr_quote(line_info):
"Translate lines escaped with: ,"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
'", "'.join(line_info.the_rest.split()) )
def _tr_quote2(line_info):
"Translate lines escaped with: ;"
return '%s%s("%s")' % (line_info.pre, line_info.ifun,
line_info.the_rest)
def _tr_paren(line_info):
"Translate lines escaped with: /"
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split()))
tr = { ESC_SHELL : _tr_system,
ESC_SH_CAP : _tr_system2,
ESC_HELP : _tr_help,
ESC_HELP2 : _tr_help,
ESC_MAGIC : _tr_magic,
ESC_QUOTE : _tr_quote,
ESC_QUOTE2 : _tr_quote2,
ESC_PAREN : _tr_paren }
@StatelessInputTransformer.wrap
def escaped_commands(line):
"""Transform escaped commands - %magic, !system, ?help + various autocalls.
"""
if not line or line.isspace():
return line
lineinf = LineInfo(line)
if lineinf.esc not in tr:
return line
return tr[lineinf.esc](lineinf)
_initial_space_re = re.compile(r'\s*')
_help_end_re = re.compile(r"""(%{0,2}
[a-zA-Z_*][\w*]* # Variable name
(\.[a-zA-Z_*][\w*]*)* # .etc.etc
)
(\?\??)$ # ? or ??
""",
re.VERBOSE)
# Extra pseudotokens for multiline strings and data structures
_MULTILINE_STRING = object()
_MULTILINE_STRUCTURE = object()
def _line_tokens(line):
"""Helper for has_comment and ends_in_comment_or_string."""
readline = StringIO(line).readline
toktypes = set()
try:
for t in generate_tokens(readline):
toktypes.add(t[0])
except TokenError as e:
# There are only two cases where a TokenError is raised.
if 'multi-line string' in e.args[0]:
toktypes.add(_MULTILINE_STRING)
else:
toktypes.add(_MULTILINE_STRUCTURE)
return toktypes
def has_comment(src):
"""Indicate whether an input line has (i.e. ends in, or is) a comment.
This uses tokenize, so it can distinguish comments from # inside strings.
Parameters
----------
src : string
A single line input string.
Returns
-------
comment : bool
True if source has a comment.
"""
return (tokenize2.COMMENT in _line_tokens(src))
def ends_in_comment_or_string(src):
"""Indicates whether or not an input line ends in a comment or within
a multiline string.
Parameters
----------
src : string
A single line input string.
Returns
-------
comment : bool
True if source ends in a comment or multiline string.
"""
toktypes = _line_tokens(src)
return (tokenize2.COMMENT in toktypes) or (_MULTILINE_STRING in toktypes)
@StatelessInputTransformer.wrap
def help_end(line):
"""Translate lines with ?/?? at the end"""
m = _help_end_re.search(line)
if m is None or ends_in_comment_or_string(line):
return line
target = m.group(1)
esc = m.group(3)
lspace = _initial_space_re.match(line).group(0)
# If we're mid-command, put it back on the next prompt for the user.
next_input = line.rstrip('?') if line.strip() != m.group(0) else None
return _make_help_call(target, esc, lspace, next_input)
@CoroutineInputTransformer.wrap
def cellmagic(end_on_blank_line=False):
"""Captures & transforms cell magics.
After a cell magic is started, this stores up any lines it gets until it is
reset (sent None).
"""
tpl = 'get_ipython().run_cell_magic(%r, %r, %r)'
cellmagic_help_re = re.compile('%%\w+\?')
line = ''
while True:
line = (yield line)
# consume leading empty lines
while not line:
line = (yield line)
if not line.startswith(ESC_MAGIC2):
# This isn't a cell magic, idle waiting for reset then start over
while line is not None:
line = (yield line)
continue
if cellmagic_help_re.match(line):
# This case will be handled by help_end
continue
first = line
body = []
line = (yield None)
while (line is not None) and \
((line.strip() != '') or not end_on_blank_line):
body.append(line)
line = (yield None)
# Output
magic_name, _, first = first.partition(' ')
magic_name = magic_name.lstrip(ESC_MAGIC2)
line = tpl % (magic_name, first, u'\n'.join(body))
def _strip_prompts(prompt_re, initial_re=None, turnoff_re=None):
"""Remove matching input prompts from a block of input.
Parameters
----------
prompt_re : regular expression
A regular expression matching any input prompt (including continuation)
initial_re : regular expression, optional
A regular expression matching only the initial prompt, but not continuation.
If no initial expression is given, prompt_re will be used everywhere.
Used mainly for plain Python prompts, where the continuation prompt
``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
If initial_re and prompt_re differ,
only initial_re will be tested against the first line.
If any prompt is found on the first two lines,
prompts will be stripped from the rest of the block.
"""
if initial_re is None:
initial_re = prompt_re
line = ''
while True:
line = (yield line)
# First line of cell
if line is None:
continue
out, n1 = initial_re.subn('', line, count=1)
if turnoff_re and not n1:
if turnoff_re.match(line):
# We're in e.g. a cell magic; disable this transformer for
# the rest of the cell.
while line is not None:
line = (yield line)
continue
line = (yield out)
if line is None:
continue
# check for any prompt on the second line of the cell,
# because people often copy from just after the first prompt,
# so we might not see it in the first line.
out, n2 = prompt_re.subn('', line, count=1)
line = (yield out)
if n1 or n2:
# Found a prompt in the first two lines - check for it in
# the rest of the cell as well.
while line is not None:
line = (yield prompt_re.sub('', line, count=1))
else:
# Prompts not in input - wait for reset
while line is not None:
line = (yield line)
@CoroutineInputTransformer.wrap
def classic_prompt():
"""Strip the >>>/... prompts of the Python interactive shell."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(>>>|\.\.\.)( |$)')
initial_re = re.compile(r'^>>>( |$)')
# Any %magic/!system is IPython syntax, so we needn't look for >>> prompts
turnoff_re = re.compile(r'^[%!]')
return _strip_prompts(prompt_re, initial_re, turnoff_re)
@CoroutineInputTransformer.wrap
def ipy_prompt():
"""Strip IPython's In [1]:/...: prompts."""
# FIXME: non-capturing version (?:...) usable?
prompt_re = re.compile(r'^(In \[\d+\]: |\s*\.{3,}: ?)')
# Disable prompt stripping inside cell magics
turnoff_re = re.compile(r'^%%')
return _strip_prompts(prompt_re, turnoff_re=turnoff_re)
@CoroutineInputTransformer.wrap
def leading_indent():
"""Remove leading indentation.
If the first line starts with a spaces or tabs, the same whitespace will be
removed from each following line until it is reset.
"""
space_re = re.compile(r'^[ \t]+')
line = ''
while True:
line = (yield line)
if line is None:
continue
m = space_re.match(line)
if m:
space = m.group(0)
while line is not None:
if line.startswith(space):
line = line[len(space):]
line = (yield line)
else:
# No leading spaces - wait for reset
while line is not None:
line = (yield line)
_assign_pat = \
r'''(?P<lhs>(\s*)
([\w\.]+) # Initial identifier
(\s*,\s*
\*?[\w\.]+)* # Further identifiers for unpacking
\s*?,? # Trailing comma
)
\s*=\s*
'''
assign_system_re = re.compile(r'{}!\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_system_template = '%s = get_ipython().getoutput(%r)'
@StatelessInputTransformer.wrap
def assign_from_system(line):
"""Transform assignment from system commands (e.g. files = !ls)"""
m = assign_system_re.match(line)
if m is None:
return line
return assign_system_template % m.group('lhs', 'cmd')
assign_magic_re = re.compile(r'{}%\s*(?P<cmd>.*)'.format(_assign_pat), re.VERBOSE)
assign_magic_template = '%s = get_ipython().run_line_magic(%r, %r)'
@StatelessInputTransformer.wrap
def assign_from_magic(line):
"""Transform assignment from magic commands (e.g. a = %who_ls)"""
m = assign_magic_re.match(line)
if m is None:
return line
#Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
m_lhs, m_cmd = m.group('lhs', 'cmd')
t_magic_name, _, t_magic_arg_s = m_cmd.partition(' ')
t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
return assign_magic_template % (m_lhs, t_magic_name, t_magic_arg_s)
| [
"[email protected]"
]
| |
0e10cbdfa5b1cd030ad2bdd01ff695d9fdb60938 | d88868b88864e4d10009c58b27323034715b0277 | /projects/barter/deployments/docker/barter/test.py | 1ff68bc01c1f1c3d50a8992e741a015cb27fa14d | [
"Apache-2.0"
]
| permissive | shamal112mn/packer-1 | 795ebd9e0fee444f0cbb01897a50e199b73f1307 | be2720e9cb61bf1110a880e94e32a6767a341588 | refs/heads/master | 2023-07-10T09:33:10.516559 | 2021-08-17T02:25:50 | 2021-08-17T02:25:50 | 327,763,823 | 0 | 0 | null | 2021-01-08T01:08:49 | 2021-01-08T01:08:48 | null | UTF-8 | Python | false | false | 388 | py | import requests
import json
url = "https://investors-exchange-iex-trading.p.rapidapi.com/stock/tsla/effective-spread"
headers = {
'x-rapidapi-key': "158cd4f9cdmsh0d92f8b92b1d427p1947b6jsn857aa1252e0b",
'x-rapidapi-host': "investors-exchange-iex-trading.p.rapidapi.com"
}
response = requests.request("GET", url, headers=headers)
print(json.dumps(response.json(), indent=2)) | [
"[email protected]"
]
| |
6f5a178c8d1ba0fb6bb65c7f38002457ca8ef23a | e3565e1ce607f60745f2a045aae8026661a6b99b | /resources/Onyx-1.0.511/py/onyx/util/rocutils.py | 48dd8b474c58afe853ee45475fc9479842d375ed | [
"Apache-2.0"
]
| permissive | eternity668/speechAD | 4c08d953b2ed06b3357b1c39d8709dd088a2471c | f270a1be86372b7044615e4fd82032029e123bc1 | refs/heads/master | 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,676 | py | ###########################################################################
#
# File: rocutils.py (directory: ./py/onyx/util)
# Date: Mon 10 Mar 2008 18:34
# Author: Ken Basye
# Description: Utility code for generating ROC and DET curves
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2008, 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
Utilities for generating ROC and DET curves
"""
import StringIO
def _uniquify_preserving_first(iterable, eq_pred):
item = iterable.next()
while 1:
try:
next_item = iterable.next()
except:
yield item
break
if not eq_pred(item, next_item):
yield item
item = next_item
def _uniquify_preserving_last(iterable, eq_pred):
item = iterable.next()
while 1:
try:
next_item = iterable.next()
except:
yield item
break
if not eq_pred(item, next_item):
yield item
item = next_item
else:
item = next_item
def make_ROC_data(reference, ratios):
"""
reference is a list of 0/1 values which are the correct classifications
values is a parallel list of numeric values, with higher values intending to
map toward classifications of 1.
Returns data for a ROC curve in the form of a list of triples, where each triple
contains an interesting threshold value, the fraction of correct identifications (true positives)
as a percent, and the fraction of false positives, at that threshold. The triples are
ordered by threshold from lowest (fewest false positives) to highest (most true positives)
Note that a typical ROC curve would plot false_pos on the X axis and true_pos on the Y axis
using a linear scale.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_ROC_data(ref, values)
>>> res
[(0.0, 0.0, 9), (20.0, 80.0, 4), (80.0, 100.0, 2)]
"""
det_data = make_DET_data(reference, ratios)
roc_data = [(fp, 100-miss, t) for (fp, miss, t) in det_data]
return roc_data
def make_DET_data(reference, ratios):
"""
reference is a list of 0/1 values which are the correct
classifications values is a parallel list of numeric values, with
higher values intending to map toward classifications of 1.
Returns data for a DET curve in the form of a list of triples,
where each triple contains the fraction of false positives as a
percent, the fraction of false negatives, and the threshold value
that generated those rates. The triples are ordered by threshold
from lowest (fewest false positives) to highest (fewest misses)
Note that a typical DET curve would plot false_pos on the X axis
and false_neg on the Y axis, oftentimes with a normal deviate
scale.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_DET_data(ref, values)
>>> res
[(0.0, 100.0, 9), (20.0, 19.999999999999996, 4), (80.0, 0.0, 2)]
"""
assert( len(reference) == len(ratios) )
num_pos = reference.count(1)
num_neg = reference.count(0)
assert( num_pos + num_neg == len(reference))
full_result = []
# Find the list of interesting threshholds, which is any value in
# the list of ratios
# Seems like there should be an easier way to uniquify a list
all_threshes = set(ratios)
all_threshes = list(all_threshes)
all_threshes.sort()
def count_values_over_thresh(value, ref, ratios, t):
result = 0
for (i, r) in enumerate(ratios):
if ref[i] == value and r > t:
result += 1
return result
# Now find precision and recall at each threshold
for thresh in all_threshes:
num_neg_accepted = count_values_over_thresh(0, reference, ratios, thresh)
num_pos_accepted = count_values_over_thresh(1, reference, ratios, thresh)
full_result.append((100 * float(num_neg_accepted) / num_neg, # false positives
100 * (1 - float(num_pos_accepted) / num_pos), # misses
thresh))
def eq0(x,y): return x[0] == y[0]
def eq1(x,y): return x[1] == y[1]
iter1 = _uniquify_preserving_first(iter(full_result), eq0)
ret = list(_uniquify_preserving_last(iter1, eq1))
ret.reverse()
return ret
def write_data_as_csv(data, stream, header_type = "DET"):
""" Write either ROC or DET data as comma-separated text, suitable for import into
a spreadsheet or other tool. Writes DET header fields be default, use header_type
of "ROC" or None for ROC headers or no headers, respectively.
>>> ref = [0,0,0,0,0,1,1,1,1,1]
>>> values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
>>> res = make_DET_data(ref, values)
>>> s = StringIO.StringIO()
>>> write_data_as_csv(res, s)
>>> out = s.getvalue()
>>> print out
False Alarm Rate, Miss Rate, Threshold
0.0, 100.0, 9
20.0, 20.0, 4
80.0, 0.0, 2
<BLANKLINE>
>>> s.seek(0)
>>> res = make_ROC_data(ref, values)
>>> write_data_as_csv(res, s, header_type="ROC")
>>> out = s.getvalue()
>>> print out
False Pos Rate, True Pos Rate, Threshold
0.0, 0.0, 9
20.0, 80.0, 4
80.0, 100.0, 2
<BLANKLINE>
>>> s.close()
"""
if header_type == "DET":
stream.write("False Alarm Rate, Miss Rate, Threshold")
elif header_type == "ROC":
stream.write("False Pos Rate, True Pos Rate, Threshold")
[stream.write("\n%s, %s, %s" % triple) for triple in data]
stream.write("\n")
def _test0():
ref = [0,0,0,0,0,1,1,1,1,1]
values = [2, 3, 4, 9, 4, 5, 6, 9, 9, 3]
res = make_DET_data(ref, values)
s = open("foo_csv.txt", "w")
write_data_as_csv(res, s)
s.close()
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
# _test0()
| [
"[email protected]"
]
| |
4438a410880898850073b4bc83f77e73ca792121 | eadd15064aa74811e7a3718b617636627ef4fd47 | /web/migrations/0020_rename_index_indexpage.py | 8efb4605a936d00a29a46a2c95ef6c4263e63c65 | []
| no_license | topsai/plasrefine_backstage | 262f7bb032daa4d018aac1519e1139cb060c3f91 | 1eb34dd0b13ebdc2a42dd6ed1aaa2d08c18ab5fb | refs/heads/master | 2023-04-12T13:24:22.710108 | 2021-05-08T14:16:41 | 2021-05-08T14:16:41 | 361,993,024 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 326 | py | # Generated by Django 3.2 on 2021-05-02 08:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0019_auto_20210502_1558'),
]
operations = [
migrations.RenameModel(
old_name='Index',
new_name='IndexPage',
),
]
| [
"[email protected]"
]
| |
241ac7b70c2142fba7ff196677ed61d5e0910d2f | 587290dbd33c5fb60a154eacd2155e681a3f9ecf | /js/gatsby/index.cgi | db257841537410c023fb93f34bb0b3e0a10dcd00 | []
| no_license | jaredly/prog | b6408db52c16e9d3c322933f0624c23663d33ce0 | e8fe82ccd1abe42371adbb3f317576facac546ca | refs/heads/master | 2021-01-20T09:12:48.931999 | 2013-08-29T04:24:36 | 2013-08-29T04:24:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,523 | cgi | #!/usr/bin/python
print 'Content-type:text/html\n'
import cgi,cgitb,os,sys,re
cgitb.enable()
def load_chapter(chap):
return open('chapter%s.txt'%chap).read()
def load_chapters():
d=[]
for i in range(1,10):
d.append([i,load_chapter(i)])
return d
def print_entry(at,chap,pageat,item):
if at==1:nd="<sup>st</sup>"
elif at==2:nd="<sup>nd</sup>"
elif at==3:nd="<sup>rd</sup>"
else:nd="<sup>th</sup>"
return "<br><br><br><b>%s%s</b> paragraph in chapter <b>%s</b> (around page %s)<br><br>\n"%(at, nd, chap, pageat)+item
form = cgi.FieldStorage()
print """
<html><head><title>Great Gatsby Search</title></head><body>
<style>
span {
font-weight: bold;
font-size: 1.1em;
color: black;
background-color: #ccc;
}
h2 {
text-align:center;
}
div.searchform {
background-color:#BBFFAA;
border:2px solid green;
padding:15px;
position:absolute;
right:0px;
top:0px;
}
form {
margin: 0px;
}
</style>
<h1>Search the Great Gatsby</h1>
<div class="searchform">
<form method="GET">
Search For: <input name="s" value="%s"> <input type="checkbox" name="whole" value="1"> Whole word
<input type="submit" value="Search">
</form>
</div>
<br>"""%(form.has_key("s") and form["s"].value or "")
pages = [1, 23, 39, 61, 81, 97, 113, 147, 163, 180 ] ## None ## [3, 16, 26, 39, 52, 62, 93, 103]
retr = ""
num = 0
if form.has_key('s'):
term = form['s'].value.strip()
iterm=term
if form.has_key('whole'):
term='(?<=\W)'+term+'(?=\W)'
for chapter,text in load_chapters():
for i,body in enumerate(text.split('\n')):
all = re.search(term,body,re.I|re.S)
if pages:
pchap = pages[chapter-1]
#print text.find(body),len(text)
pat = int(round(float(pages[chapter]-pchap)* (text.find(body)/float(len(text)))+pchap))
else:
pat = ""
rgx = re.compile(term,re.I)
bdy = rgx.sub(lambda x:'<span>'+x.group()+'</span>', body)+'<br><br>'
## bdy = re.sub(term, lambda x:'<span>'+x.group()+'</span>', body)+'<br><br>'
if all:
## print (text.find(body)/float(len(text))),float(pages[chapter]-pchap)
## print float(pages[chapter]-pchap)*(text.find(body)/float(len(text)))+pchap
retr += print_entry(i,chapter,pat,bdy)
num+=1
print "<h3>Found %d results for %s</h3>"%(num,iterm)
print retr | [
"[email protected]"
]
| |
21bc2d0fbd981fbefdd919c846357da41182c5ac | e48eac671ea8335f696ec5fd0511b12800b0fca0 | /accounts/models.py | 6625a130b79945d6d4613bcc9a0b047c32339541 | []
| no_license | linusidom/django-basic-reservation-system | 8fba10708cebd42d5ad308c0ef838a0fe1ac8778 | 5dd5db4832fe83e862424af18aae3aad6cf3f1ed | refs/heads/master | 2020-03-26T08:37:15.052362 | 2018-08-14T11:34:19 | 2018-08-14T11:34:19 | 144,711,607 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 359 | py | from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
from django.shortcuts import reverse
class Profile(AbstractUser):
ideal_weight = models.IntegerField(default=185)
def __str__(self):
return self.email
def get_absolute_url(self):
return reverse('accounts:profile_detail', kwargs={'pk':pk})
| [
"[email protected]"
]
| |
f8fdac6b1a2846a9f74f7db1f038fed9022ab0a4 | 5dd03f9bd8886f02315c254eb2569e4b6d368849 | /3rdparty/python/GitPython-0.3.1-py2.6.egg/git/__init__.py | 500d053f7729d7172f300870e30b00ae7a523f09 | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
]
| permissive | adamsxu/commons | 9e1bff8be131f5b802d3aadc9916d5f3a760166c | 9fd5a4ab142295692994b012a2a2ef3935d35c0b | refs/heads/master | 2021-01-17T23:13:51.478337 | 2012-03-11T17:30:24 | 2012-03-11T17:30:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,171 | py | # __init__.py
# Copyright (C) 2008, 2009 Michael Trier ([email protected]) and contributors
#
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import sys
import inspect
__version__ = '0.3.1'
#{ Initialization
def _init_externals():
"""Initialize external projects by putting them into the path"""
sys.path.append(os.path.join(os.path.dirname(__file__), 'ext', 'gitdb'))
try:
import gitdb
except ImportError:
raise ImportError("'gitdb' could not be found in your PYTHONPATH")
#END verify import
#} END initialization
#################
_init_externals()
#################
#{ Imports
from git.config import GitConfigParser
from git.objects import *
from git.refs import *
from git.diff import *
from git.exc import *
from git.db import *
from git.cmd import Git
from git.repo import Repo
from git.remote import *
from git.index import *
from git.util import (
LockFile,
BlockingLockFile,
Stats,
Actor
)
#} END imports
__all__ = [ name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)) ]
| [
"[email protected]"
]
| |
996aac45cc3fff5b7b5a9eb0567f864fdb8f7981 | 8c4af05e0257661195c95b0b9e0873eeb6391dab | /packages/python-packages/apiview-gpt/src/_models.py | 85d435a1aa1dd9ca713a5cccc403df55f16ebc0d | [
"MIT",
"LicenseRef-scancode-generic-cla"
]
| permissive | Azure/azure-sdk-tools | 6d171054800807fcbe7b8b878c5903a202d31faa | 2dce521dedc3f5169007d4c481ae8ec077be4450 | refs/heads/main | 2023-09-01T00:00:32.662190 | 2023-08-31T22:21:44 | 2023-08-31T22:21:44 | 170,592,186 | 113 | 174 | MIT | 2023-09-14T21:53:41 | 2019-02-13T22:55:46 | C# | UTF-8 | Python | false | false | 744 | py | from pydantic import BaseModel, Field
from typing import List, Optional
class Violation(BaseModel):
rule_ids: List[str] = Field(description="unique rule ID or IDs that were violated.")
line_no: Optional[int] = Field(description="the line number of the violation.")
bad_code: str = Field(description="the original code that was bad, cited verbatim.")
suggestion: str = Field(description="the suggested fix for the bad code.")
comment: str = Field(description="a comment about the violation.")
class GuidelinesResult(BaseModel):
status: str = Field(description="Succeeded if the request has no violations. Error if there are violations.")
violations: List[Violation] = Field(description="list of violations if any")
| [
"[email protected]"
]
| |
ad74fb1924b3b8c48c2d09b0bbc03cd105920500 | af97a81a69d100e684b8e9f02bc1b117069b9895 | /Testcases_Copilot/CWE-730/codeql_2.py | 1c59968d10ace977410229a13e67c6c9188734a4 | []
| no_license | s2e-lab/SecurityEval | 8de3e2f78fffa7058d79cce84fbcd8ff29c8468d | f637f67b8e8d52d641ab15903e46a41ead7e30a5 | refs/heads/main | 2023-06-17T22:55:08.047769 | 2023-06-16T16:54:54 | 2023-06-16T16:54:54 | 509,664,226 | 28 | 7 | null | 2023-05-27T04:55:05 | 2022-07-02T05:10:17 | Python | UTF-8 | Python | false | false | 294 | py | from flask import request
import re
@app.route("/direct")
def direct():
'''
get the regular expression pattern from the get request, comipile it and then use it for search
'''
pattern = request.args.get('pattern')
regex = re.compile(pattern)
return regex.search(text)
| [
"[email protected]"
]
| |
aeff8452dda485b474d6f78dec9db2f3f258e6ff | 80d1f1f50b9f7f6be17302df7695154f54aa7a76 | /pdp8/core.py | 3691cb7deeb3ab2b53a7ab5d3ba6877bf6d1f838 | [
"MIT"
]
| permissive | romilly/pdp8 | af7f7df0136b31df03a4f53b709869a59f25ca2c | 68e1025c5e4c6b6fa92a62cc81117d841c214137 | refs/heads/master | 2020-03-13T15:24:12.955602 | 2019-06-19T05:58:50 | 2019-06-19T05:58:50 | 131,176,107 | 4 | 1 | null | 2018-05-16T07:13:14 | 2018-04-26T15:31:01 | HTML | UTF-8 | Python | false | false | 7,983 | py | from io import StringIO
from pdp8.tracing import NullTracer
def octal(string):
return int(string, 8)
OPR_GROUP1 = octal('0400')
OPR_GROUP2 = octal('0001')
CLA1 = octal('0200')
CLL = octal('0100')
CMA = octal('0040')
CML = octal('0020')
RAR = octal('0010')
RAL = octal('0004')
RTR = octal('0012')
RTL = octal('0006')
IAC = octal('0001')
HALT = octal('0002')
BIT8 = octal('0010')
Z_BIT = octal('0200')
I_BIT = octal('0400')
class PDP8:
# TODO simplify these, use constants rather than calculating?
W_BITS = 12 # number of bits in a word
W_MASK = 2 ** W_BITS - 1 # word mask
OP_BITS = 3 # 3 bits in the opcode
V_BITS = 7 # 7 bits for the value part of an instruction
OP_MASK = (2 ** OP_BITS - 1) << W_BITS - OP_BITS
V_MASK = 2 ** V_BITS - 1 # mask for instruction data
MAX = 2 ** (V_BITS - 1)
def __init__(self):
self.memory = 2 ** self.W_BITS * [0]
self.pc = 0
self.accumulator = 0
self.link = 0
self.running = False
self.debugging = False
self.stepping = False
self.ia = None
self.instruction = None
self.tape = StringIO('')
self.READER1 = 0o03
self.PUNCH1 = 0o04
self.punchflag = 0
self.output = ''
self.tracer = None
self.ops = [self.andi,
self.tad,
self.isz,
self.dca,
self.jms,
self.jmp,
self.iot,
self.opr]
def __getitem__(self, address):
return self.memory[address] & self.W_MASK # only 12 bits retrieved
def is_group1(self):
return 0 == self.i_mask(OPR_GROUP1)
def i_mask(self, mask):
return self.instruction & mask
def is_iac(self):
return 0 != self.i_mask(IAC)
def is_group2(self):
return (not self.is_group1()) and 0 == self.i_mask(OPR_GROUP2)
# Group 2
def is_halt(self):
return self.i_mask(HALT)
def __setitem__(self, address, contents):
self.memory[address] = contents & self.W_MASK # only 12 bits stored
if self.debugging:
self.tracer.setting(address, contents)
def run(self, debugging=False, start=None, tape='', stepping=None, tracer=None):
self.running = True
if tracer is not None:
self.tracer = tracer
else:
if self.tracer is None:
self.tracer = NullTracer()
if start:
self.pc = start
# TODO: smarter tape creation to cope with text and binary tapes.
self.tape = StringIO(tape)
if stepping is not None:
self.stepping = stepping
self.debugging = debugging
while self.running:
self.execute()
if self.stepping:
self.running = False
def execute(self):
old_pc = self.pc # for debugging
self.instruction = self[self.pc]
self.ia = self.instruction_address()
op = self.opcode()
self.pc += 1
self.ops[op]()
if self.debugging:
self.tracer.instruction(old_pc, self.instruction, self.accumulator, self.link, self.pc)
def opcode(self):
bits = self.i_mask(self.OP_MASK)
code = bits >> self.W_BITS - self.OP_BITS
return code
def andi(self):
self.accumulator &= self[self.ia]
def tad(self):
self.add_12_bits(self[self.ia])
def add_12_bits(self, increment):
self.accumulator += increment
total = self.accumulator
self.accumulator &= octal('7777')
if self.accumulator == total:
self.link = 0
else:
self.link = 1
def isz(self):
contents = self[self.ia]
contents += 1
self[self.ia] = contents # forces 12-bit value
if self[self.ia] == 0:
self.pc += 1 # skip
def dca(self):
self[self.ia] = self.accumulator
self.accumulator = 0
def jmp(self):
self.pc = self.ia
def jms(self):
self[self.ia] = self.pc
self.pc = self.ia + 1
def iot(self):
device = (self.instruction & 0o0770) >> 3
io_op = self.instruction & 0o0007
if device == self.READER1:
self.reader(io_op)
elif device == self.PUNCH1:
self.punch(io_op)
else:
raise ValueError('uknown device')
def opr(self):
if self.is_group1():
self.group1()
return
if self.is_group2():
self.group2()
return
raise ValueError('Unknown opcode in instruction 0o%o at %d(%o)' % (self.instruction, self.pc-1, self.pc-1) )
def instruction_address(self):
o = self.i_mask(self.V_MASK)
if not self.i_mask(Z_BIT):
o += self.pc & 0o7600
if self.i_mask(I_BIT):
o = self[o]
return o
def cla(self):
self.accumulator = 0
def cll(self):
self.link = 0
def cma(self):
self.accumulator ^= 0o7777
def cml(self):
self.link = 1-self.link
def rr(self):
self.rar(0 < self.i_mask(2))
def rar(self, flag):
count = 2 if flag else 1
for i in range(count):
new_link = self.accumulator & 0o0001
self.accumulator = self.accumulator >> 1
if self.link:
self.accumulator |= 0o4000
self.link = new_link
def rl(self):
self.ral(self.i_mask(2))
def ral(self, flag):
count = 2 if flag else 1
for i in range(count):
new_link = 1 if self.accumulator & 0o4000 else 0
self.accumulator = 0o7777 & self.accumulator << 1
if self.link:
self.accumulator |= 0o0001
self.link = new_link
def iac(self):
self.add_12_bits(1)
def halt(self):
if self.debugging:
print('Halted')
self.tracer.halt(self.pc)
self.running = False
def group1(self):
for (mask, ins) in zip([ CLA1, CLL, CMA, CML, IAC, RAR, RAL],
[self.cla, self.cll, self.cma, self.cml, self.iac,self.rr, self.rl]):
if self.i_mask(mask):
ins()
def is_or_group(self):
return not self.i_mask(BIT8)
def is_and_group(self):
return self.i_mask(BIT8)
def group2(self):
if self.is_or_group() and (self.sma() or self.sza() or self.snl()):
self.pc += 1
if self.is_and_group() and self.spa() and self.sna() and self.szl():
self.pc += 1
if self.is_cla2():
self.cla()
if self.is_halt():
self.halt()
def sma(self):
return self.accumulator_is_negative() and (self.i_mask(octal('0100')))
def accumulator_is_negative(self):
return self.accumulator & octal('4000')
def sza(self):
return self.accumulator == 0 and (self.i_mask(octal('0040')))
def snl(self):
return self.link == 1 and (self.i_mask(octal('0020')))
def spa(self):
return self.accumulator_is_positive() or not (self.i_mask(octal('0100')))
def accumulator_is_positive(self):
return not self.accumulator_is_negative()
def sna(self):
return self.accumulator != 0 or not (self.i_mask(octal('0040')))
def szl(self):
return self.link == 0 or not (self.i_mask(octal('0020')))
def reader(self, io_op):
pass
def punch(self, io_op):
if (io_op & 1) and self.punchflag:
self.pc += 1
if io_op & 2:
self.punchflag = 0
if io_op & 4:
if self.accumulator != 0:
self.output += str(chr(self.accumulator))
self.punchflag = 1
def is_cla2(self):
return self.instruction & octal('0200')
| [
"[email protected]"
]
| |
def4e0c9060cbb6946a984f723129a8064a91715 | 16679038c7a0b75097ffdd2d5b6be28ae8dae68f | /test/utilities/test_catch_exceptions.py | 74b112680efb4b09be050e92f0ab2d6cc4bcdc2b | [
"MIT"
]
| permissive | elifesciences/profiles | d98e5c2391630f9877e0585e07143d7904f1e777 | 9cd2e523f9dfa864891511e6525381f191951b24 | refs/heads/develop | 2023-08-31T03:09:08.723797 | 2023-08-25T06:54:55 | 2023-08-25T06:54:55 | 94,993,646 | 2 | 0 | MIT | 2023-06-21T01:15:37 | 2017-06-21T10:43:52 | Python | UTF-8 | Python | false | false | 959 | py | import logging
from logging import Handler, Logger, Manager
from logging.handlers import BufferingHandler
from pytest import fixture
from profiles.utilities import catch_exceptions
@fixture
def logger(handler: Handler) -> Logger:
logger = Logger('logger', logging.DEBUG)
logger.addHandler(handler)
logger.manager = Manager('root')
return logger
@fixture
def handler() -> Handler:
return BufferingHandler(100)
def test_it_catches_and_logs_exceptions(logger: Logger, handler: BufferingHandler):
@catch_exceptions(logger)
def my_function():
raise Exception('My exception')
result = my_function()
assert result is None
assert len(handler.buffer) == 1
def test_it_does_nothing_when_no_exception(logger: Logger, handler: BufferingHandler):
@catch_exceptions(logger)
def my_function():
return True
result = my_function()
assert result is True
assert len(handler.buffer) == 0
| [
"[email protected]"
]
| |
d193d711f2be24fe4204a34d2b1a3b14eda09afd | d40ab8694389d1a0d80013a2b0ecd8c426e6e8f8 | /graphs/scc.py | 5847ec8cbff74c175a28bd22a6d879601af33ceb | []
| no_license | lukebiggerstaff/Stanford-Algorithms-MOOC | b5b34c8d8ff7725461fd03bb3aac505c87a1012e | 382a30f27dff6ca9d30c071a4d3418ff6333f4c3 | refs/heads/master | 2021-01-01T17:02:26.883373 | 2017-11-14T19:06:14 | 2017-11-14T19:06:14 | 97,980,984 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,171 | py | import sys
import re
import resource
from collections import defaultdict
sys.setrecursionlimit(10 ** 6)
resource.setrlimit(resource.RLIMIT_STACK, (2 ** 29, 2 ** 30))
def dfsfirstpass(graph):
visited = set()
stack = list()
for i in graph.keys():
start = str(i)
if start in graph:
dfsfirstpassrecursive(graph, start, stack, visited)
return stack
def dfsfirstpassrecursive(graph, start, stack, visited):
if start not in visited:
visited.add(start)
if start in graph:
for edge in graph[start]:
if edge not in visited:
dfsfirstpassrecursive(graph, edge, stack, visited)
stack.append(start)
def dfssecondpass(rgraph, stack):
visited = set()
leaderlist = defaultdict(list)
while stack:
start = stack.pop()
if start not in visited:
visited.add(start)
leader = start
leaderlist[leader] += [start]
for edge in set(rgraph[start]) - visited:
dfsrecursive(rgraph, edge, visited, leaderlist, leader)
return leaderlist
def dfsrecursive(graph, start, visited, leaderlist, leader):
visited.add(start)
leaderlist[leader] += [start]
for edge in set(graph[start]) - visited:
dfsrecursive(graph, edge, visited, leaderlist, leader)
def return_top_five_scc(leaderlist):
sccsizelist = list()
for key in leaderlist.keys():
size = len(leaderlist[key])
sccsizelist.append(size)
sccsizelist.sort()
return sccsizelist[-5:]
def kosaraju(graph, rgraph):
stack = dfsfirstpass(rgraph)
#print(f'stack is {stack}')
leaderdict = dfssecondpass(graph, stack)
#print(f'graph is {graph}\n'
#f'leader is {leaderdict}\n')
top5 = return_top_five_scc(leaderdict)
return top5
if __name__ == '__main__':
graph = defaultdict(list)
rgraph = defaultdict(list)
with open(sys.argv[1]) as f:
for line in f:
line_lst = re.findall(r'(\d+|\w+)',line)
graph[line_lst[0]] += [line_lst[1]]
rgraph[line_lst[1]] += [line_lst[0]]
print(kosaraju(graph,rgraph))
| [
"[email protected]"
]
| |
9cf3d784947858edaf604e6c180fff1007ff9612 | 0b01cb61a4ae4ae236a354cbfa23064e9057e434 | /alipay/aop/api/request/AlipayExscUserFirstsignGetRequest.py | e639edfe18817ae9d75d814b5ee4329e27910ec7 | [
"Apache-2.0"
]
| permissive | hipacloud/alipay-sdk-python-all | e4aec2869bf1ea6f7c6fb97ac7cc724be44ecd13 | bdbffbc6d5c7a0a3dd9db69c99443f98aecf907d | refs/heads/master | 2022-11-14T11:12:24.441822 | 2020-07-14T03:12:15 | 2020-07-14T03:12:15 | 277,970,730 | 0 | 0 | Apache-2.0 | 2020-07-08T02:33:15 | 2020-07-08T02:33:14 | null | UTF-8 | Python | false | false | 3,664 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AlipayExscUserFirstsignGetRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._alipay_id = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def alipay_id(self):
return self._alipay_id
@alipay_id.setter
def alipay_id(self, value):
self._alipay_id = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.exsc.user.firstsign.get'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.alipay_id:
if hasattr(self.alipay_id, 'to_alipay_dict'):
params['alipay_id'] = json.dumps(obj=self.alipay_id.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['alipay_id'] = self.alipay_id
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| [
"[email protected]"
]
| |
91c8298a8f35841bf72996c47795505cf4afd03a | 65c001b5f572a6b0ca09dd9821016d628b745009 | /frappe-bench/env/lib/python2.7/site-packages/watchdog/observers/polling.py | 3039ceb3678ce611aeccc6d88d0586c3f632a5e2 | [
"MIT"
]
| permissive | ibrahmm22/library-management | 666dffebdef1333db122c2a4a99286e7c174c518 | b88a2129a5a2e96ce1f945ec8ba99a0b63b8c506 | refs/heads/master | 2022-10-30T17:53:01.238240 | 2020-06-11T18:36:41 | 2020-06-11T18:36:41 | 271,620,992 | 0 | 1 | MIT | 2022-10-23T05:04:57 | 2020-06-11T18:36:21 | CSS | UTF-8 | Python | false | false | 4,687 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011 Yesudeep Mangalapilly <[email protected]>
# Copyright 2012 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:module: watchdog.observers.polling
:synopsis: Polling emitter implementation.
:author: [email protected] (Yesudeep Mangalapilly)
Classes
-------
.. autoclass:: PollingObserver
:members:
:show-inheritance:
.. autoclass:: PollingObserverVFS
:members:
:show-inheritance:
:special-members:
"""
from __future__ import with_statement
import os
import threading
from functools import partial
from watchdog.utils import stat as default_stat
from watchdog.utils.dirsnapshot import DirectorySnapshot, DirectorySnapshotDiff
from watchdog.observers.api import (
EventEmitter,
BaseObserver,
DEFAULT_OBSERVER_TIMEOUT,
DEFAULT_EMITTER_TIMEOUT
)
from watchdog.events import (
DirMovedEvent,
DirDeletedEvent,
DirCreatedEvent,
DirModifiedEvent,
FileMovedEvent,
FileDeletedEvent,
FileCreatedEvent,
FileModifiedEvent
)
class PollingEmitter(EventEmitter):
"""
Platform-independent emitter that polls a directory to detect file
system changes.
"""
def __init__(self, event_queue, watch, timeout=DEFAULT_EMITTER_TIMEOUT,
stat=default_stat, listdir=os.listdir):
EventEmitter.__init__(self, event_queue, watch, timeout)
self._snapshot = None
self._lock = threading.Lock()
self._take_snapshot = lambda: DirectorySnapshot(
self.watch.path, self.watch.is_recursive, stat=stat, listdir=listdir)
def queue_events(self, timeout):
if not self._snapshot:
self._snapshot = self._take_snapshot()
# We don't want to hit the disk continuously.
# timeout behaves like an interval for polling emitters.
if self.stopped_event.wait(timeout):
return
with self._lock:
if not self.should_keep_running():
return
# Get event diff between fresh snapshot and previous snapshot.
# Update snapshot.
new_snapshot = self._take_snapshot()
events = DirectorySnapshotDiff(self._snapshot, new_snapshot)
self._snapshot = new_snapshot
# Files.
for src_path in events.files_deleted:
self.queue_event(FileDeletedEvent(src_path))
for src_path in events.files_modified:
self.queue_event(FileModifiedEvent(src_path))
for src_path in events.files_created:
self.queue_event(FileCreatedEvent(src_path))
for src_path, dest_path in events.files_moved:
self.queue_event(FileMovedEvent(src_path, dest_path))
# Directories.
for src_path in events.dirs_deleted:
self.queue_event(DirDeletedEvent(src_path))
for src_path in events.dirs_modified:
self.queue_event(DirModifiedEvent(src_path))
for src_path in events.dirs_created:
self.queue_event(DirCreatedEvent(src_path))
for src_path, dest_path in events.dirs_moved:
self.queue_event(DirMovedEvent(src_path, dest_path))
class PollingObserver(BaseObserver):
"""
Platform-independent observer that polls a directory to detect file
system changes.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT):
BaseObserver.__init__(self, emitter_class=PollingEmitter, timeout=timeout)
class PollingObserverVFS(BaseObserver):
"""
File system independent observer that polls a directory to detect changes.
"""
def __init__(self, stat, listdir, polling_interval=1):
"""
:param stat: stat function. See ``os.stat`` for details.
:param listdir: listdir function. See ``os.listdir`` for details.
:type polling_interval: float
:param polling_interval: interval in seconds between polling the file system.
"""
emitter_cls = partial(PollingEmitter, stat=stat, listdir=listdir)
BaseObserver.__init__(self, emitter_class=emitter_cls, timeout=polling_interval)
| [
"[email protected]"
]
| |
8180cf64858edff78040580e57e99bca34304e13 | bd649f51496a24a55a2327e658f31d6e03e2f602 | /InvTL/lm_py/py/bin/py.which | f2b30cfff3ced4c92ec7655ae03a05a1f0b7818b | [
"MIT"
]
| permissive | mickg10/DARLAB | 6507530231f749e8fc1647f3a9bec22a20bebe46 | 0cd8d094fcaf60a48a3b32f15e836fcb48d93e74 | refs/heads/master | 2020-04-15T20:39:23.403215 | 2019-01-10T06:54:50 | 2019-01-10T06:54:50 | 16,510,433 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 65 | which | #!/usr/bin/env python
from _findpy import py
py.cmdline.pywhich() | [
"[email protected]"
]
| |
64bef1b8d66e25515d68a737b143f8d15d5675ce | 7790e3a3f2de068fef343585ec856983591997a2 | /bank/migrations/0021_followlawtype.py | 67c1e5c0f8ed434aeb042dbf4b3e27f516602279 | []
| no_license | mehdi1361/tadbir | ce702a9a02672826f0bf06e8d5cf0644efe31949 | c0a67710099f713cf96930e25df708625de89a6f | refs/heads/master | 2021-06-04T07:35:37.624372 | 2018-07-23T05:25:04 | 2018-07-23T05:25:04 | 148,870,028 | 0 | 0 | null | 2019-10-22T21:40:28 | 2018-09-15T04:40:26 | HTML | UTF-8 | Python | false | false | 1,147 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-05-11 16:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bank', '0020_auto_20180510_1351'),
]
operations = [
migrations.CreateModel(
name='FollowLawType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='تاریخ ایجاد')),
('update_at', models.DateTimeField(auto_now=True, verbose_name='تاریخ بروزرسانی')),
('type', models.CharField(max_length=100, verbose_name='نوع پیگیری')),
('enable', models.BooleanField(default=False, verbose_name='فعال')),
],
options={
'verbose_name': 'پیگیری حقوقی',
'verbose_name_plural': 'پیگیری های حقوقی',
'db_table': 'follow_low_type',
},
),
]
| [
"[email protected]"
]
| |
5497eed0b98d3d44dc25ed39c7376e7800f9fcaa | 350cb6c7c7a7842e80aa06ee32bfffc5bc35ee03 | /programming/language/python/python-pillow/actions.py | 09179eb1424c0d20883d92c49aeb6480d96ba765 | []
| no_license | LimeLinux/Packages-2 | f41d11343e8b39274ccd85b9850d0f4e76830031 | 356975df129f2097f12dbed3bc2604cadb5a6c64 | refs/heads/master | 2021-04-30T23:25:31.121967 | 2017-01-21T21:46:54 | 2017-01-21T21:46:54 | 79,139,920 | 0 | 2 | null | 2017-01-21T21:46:55 | 2017-01-16T17:02:37 | Python | UTF-8 | Python | false | false | 823 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import pisitools
from pisi.actionsapi import pythonmodules
from pisi.actionsapi import shelltools
from pisi.actionsapi import get
#WorkDir="Imaging-%s" % get.srcVERSION()
def install():
pisitools.dosed("_imagingft.c", "<freetype/freetype.h>", "<freetype2/freetype.h>")
pisitools.dosed("_imagingft.c", "<freetype/fterrors.h>", "<freetype2/fterrors.h>")
pythonmodules.install()
#shelltools.cd("Sane")
#pythonmodules.install()
#shelltools.cd("..")
for header in ["Imaging.h","ImPlatform.h"]:
pisitools.insinto("/usr/include/%s" % get.curPYTHON(), "libImaging/%s" % header)
pisitools.dodoc("README.rst")
| [
"[email protected]"
]
| |
c63f6c71799ea453d1f3eec67be2aff4089d9177 | bc41457e2550489ebb3795f58b243da74a1c27ae | /fabfile.py | 36e45dab0037e8a64b682e70626dadcb3e9d14de | []
| no_license | SEL-Columbia/ss_sql_views | 28a901d95fe779b278d2a51aec84d6bf51245c02 | d146fd96849a4d165f3dc3f197aadda804a2f60a | refs/heads/master | 2021-01-01T19:35:18.999147 | 2012-05-10T18:43:36 | 2012-05-10T18:43:36 | 3,020,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 1,649 | py | '''
fabfile for offline gateway tasks
'''
import datetime as dt
from fabric.api import local, lcd, run, env
env.hosts = ['gateway.sharedsolar.org']
env.user = 'root'
def sync_db():
time = dt.datetime.now().strftime('%y%m%d')
file = 'gateway.' + time + '.sql.zip'
url = '[email protected]'
path = 'var/lib/postgresql/backups/'
local('mkdir temp')
with lcd('temp'):
download_db(url, path, file)
load_db(path, file)
create_views()
local('rm -rf temp')
show_disk_space()
def download_db(url, path, file):
# create local temp folder
print 'Creating temporary folder ./temp'
# create timestamp
# create string for getting database
# scp database
print 'Downloading database from gateway'
local('scp ' + url + ':/' + path + file + ' .')
# locally unzip database
print 'Expanding database'
local('unzip ' + file)
def load_db(path, file):
# if database exists, dropdb
local('dropdb gateway')
# create db
local('createdb gateway')
# load database
print 'Loading database'
local('psql -d gateway -f ' + path + file[:-4])
def create_views():
print 'Executing create_views'
# execute all sql files
local('psql -d gateway -f views/create_view_primary_log.sql')
local('psql -d gateway -f views/create_view_midnight.sql')
local('psql -d gateway -f views/create_view_meter.sql')
local('psql -d gateway -f views/create_view_alarms.sql')
local('psql -d gateway -f views/create_view_solar.sql')
local('psql -d gateway -f views/create_view_recharge.sql')
def show_disk_space():
run('df -h') | [
"[email protected]"
]
| |
c67befeec81e23a656e21cce799a95202898726a | 51d602577affebc8d91ffe234f926469d389dc75 | /lis/specimen/lab_result_item/migrations/0001_initial.py | 69d94bf24a380776b43692bb1088907fc36bac4f | []
| no_license | botswana-harvard/lis | 5ac491373f74eaf3855f173580b000539d7f4740 | 48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7 | refs/heads/master | 2020-12-29T01:31:07.821681 | 2018-06-24T06:06:57 | 2018-06-24T06:06:57 | 35,820,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,300 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ResultItemAudit'
db.create_table('bhp_lab_core_resultitem_audit', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, blank=True)),
('result', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_resultitem', to=orm['lab_result.Result'])),
('test_code', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_resultitem', to=orm['lab_test_code.TestCode'])),
('result_item_value', self.gf('django.db.models.fields.CharField')(max_length=25, db_index=True)),
('result_item_quantifier', self.gf('django.db.models.fields.CharField')(default='=', max_length=25)),
('result_item_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('result_item_operator', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_status', self.gf('django.db.models.fields.CharField')(default='P', max_length=10, db_index=True)),
('validation_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('validation_username', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('result_item_source', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_audit_resultitem', to=orm['lab_result.ResultSource'])),
('result_item_source_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('error_code', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('_audit_id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('_audit_timestamp', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, db_index=True, blank=True)),
('_audit_change_type', self.gf('django.db.models.fields.CharField')(max_length=1)),
))
db.send_create_signal('lab_result_item', ['ResultItemAudit'])
# Adding model 'ResultItem'
db.create_table('bhp_lab_core_resultitem', (
('created', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('user_created', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('user_modified', self.gf('django.db.models.fields.CharField')(default='', max_length=250)),
('hostname_created', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('hostname_modified', self.gf('django.db.models.fields.CharField')(default='home', max_length=50, blank=True)),
('id', self.gf('django.db.models.fields.CharField')(max_length=36, primary_key=True)),
('result', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lab_result.Result'])),
('test_code', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lab_test_code.TestCode'])),
('result_item_value', self.gf('django.db.models.fields.CharField')(max_length=25, db_index=True)),
('result_item_quantifier', self.gf('django.db.models.fields.CharField')(default='=', max_length=25)),
('result_item_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('result_item_operator', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_status', self.gf('django.db.models.fields.CharField')(default='P', max_length=10, db_index=True)),
('validation_datetime', self.gf('django.db.models.fields.DateTimeField')(db_index=True, null=True, blank=True)),
('validation_username', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=50, null=True, blank=True)),
('validation_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('comment', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('result_item_source', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lab_result.ResultSource'])),
('result_item_source_reference', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
('error_code', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True)),
))
db.send_create_signal('lab_result_item', ['ResultItem'])
def backwards(self, orm):
# Deleting model 'ResultItemAudit'
db.delete_table('bhp_lab_core_resultitem_audit')
# Deleting model 'ResultItem'
db.delete_table('bhp_lab_core_resultitem')
models = {
'bhp_research_protocol.fundingsource': {
'Meta': {'ordering': "['name']", 'object_name': 'FundingSource'},
'description': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'})
},
'bhp_research_protocol.location': {
'Meta': {'ordering': "['name']", 'object_name': 'Location'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'})
},
'bhp_research_protocol.protocol': {
'Meta': {'ordering': "['protocol_identifier']", 'object_name': 'Protocol'},
'date_opened': ('django.db.models.fields.DateField', [], {}),
'date_registered': ('django.db.models.fields.DateField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '500'}),
'funding_source': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['bhp_research_protocol.FundingSource']", 'symmetrical': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'local_title': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'protocol_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True'}),
'research_title': ('django.db.models.fields.TextField', [], {'max_length': '250'}),
'short_title': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'site_name_fragment': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'bhp_research_protocol.site': {
'Meta': {'ordering': "['site_identifier']", 'object_name': 'Site'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Location']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'site_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'})
},
'lab_account.account': {
'Meta': {'ordering': "['account_name']", 'object_name': 'Account', 'db_table': "'bhp_lab_registration_account'"},
'account_closedate': ('django.db.models.fields.DateField', [], {}),
'account_holder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_account.AccountHolder']"}),
'account_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'account_opendate': ('django.db.models.fields.DateField', [], {}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_account.accountholder': {
'Meta': {'ordering': "['last_name', 'first_name']", 'unique_together': "(['last_name', 'first_name'],)", 'object_name': 'AccountHolder', 'db_table': "'bhp_lab_registration_accountholder'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_aliquot.aliquot': {
'Meta': {'object_name': 'Aliquot', 'db_table': "'bhp_lab_core_aliquot'"},
'aliquot_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 30, 13, 11, 14, 896689)'}),
'aliquot_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'aliquot_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_aliquot.AliquotType']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'condition': ('django.db.models.fields.related.ForeignKey', [], {'default': '10', 'to': "orm['lab_aliquot.AliquotCondition']"}),
'count': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'current_measure': ('django.db.models.fields.DecimalField', [], {'default': "'5.00'", 'max_digits': '10', 'decimal_places': '2'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'measure_units': ('django.db.models.fields.CharField', [], {'default': "'mL'", 'max_length': '25'}),
'medium': ('django.db.models.fields.CharField', [], {'default': "'TUBE'", 'max_length': '25'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'original_measure': ('django.db.models.fields.DecimalField', [], {'default': "'5.00'", 'max_digits': '10', 'decimal_places': '2'}),
'parent_identifier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_aliquot.Aliquot']", 'to_field': "'aliquot_identifier'", 'null': 'True', 'blank': 'True'}),
'receive': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_receive.Receive']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'available'", 'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_aliquot.aliquotcondition': {
'Meta': {'ordering': "['short_name']", 'object_name': 'AliquotCondition', 'db_table': "'bhp_lab_core_aliquotcondition'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '35'})
},
'lab_aliquot.aliquottype': {
'Meta': {'ordering': "['name']", 'object_name': 'AliquotType', 'db_table': "'bhp_lab_core_aliquottype'"},
'alpha_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_reference': ('django.db.models.fields.IntegerField', [], {}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'numeric_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '2'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_order.order': {
'Meta': {'object_name': 'Order', 'db_table': "'bhp_lab_core_order'"},
'aliquot': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_aliquot.Aliquot']"}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_reference': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'order_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'panel': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_panel.Panel']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_panel.panel': {
'Meta': {'object_name': 'Panel', 'db_table': "'bhp_lab_core_panel'"},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lab_account.Account']", 'symmetrical': 'False'}),
'aliquot_type': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lab_aliquot.AliquotType']", 'symmetrical': 'False'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_panel_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'panel_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_panel.PanelGroup']"}),
'test_code': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lab_test_code.TestCode']", 'symmetrical': 'False'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_panel.panelgroup': {
'Meta': {'object_name': 'PanelGroup', 'db_table': "'bhp_lab_core_panelgroup'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_patient.patient': {
'Meta': {'ordering': "['subject_identifier']", 'unique_together': "(['subject_identifier'],)", 'object_name': 'Patient', 'db_table': "'bhp_lab_registration_patient'"},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lab_account.Account']", 'null': 'True', 'blank': 'True'}),
'art_status': ('django.db.models.fields.CharField', [], {'default': "'UNKNOWN'", 'max_length': '10'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '250', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dob': ('django.db.models.fields.DateField', [], {}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'hiv_status': ('django.db.models.fields.CharField', [], {'default': "'UNKNOWN'", 'max_length': '10'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'is_dob_estimated': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'simple_consent': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['lab_patient.SimpleConsent']", 'null': 'True', 'blank': 'True'}),
'subject_identifier': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '25', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_patient.simpleconsent': {
'Meta': {'ordering': "['consent_startdate']", 'object_name': 'SimpleConsent', 'db_table': "'bhp_lab_registration_simpleconsent'"},
'consent_enddate': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'consent_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Site']"}),
'consent_startdate': ('django.db.models.fields.DateField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'may_store_samples': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Protocol']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_receive.receive': {
'Meta': {'object_name': 'Receive', 'db_table': "'bhp_lab_core_receive'"},
'clinician_initials': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'datetime_drawn': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'dmis_reference': ('django.db.models.fields.IntegerField', [], {}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_patient.Patient']"}),
'protocol': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Protocol']"}),
'receive_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2011, 8, 30, 13, 11, 14, 891588)', 'db_index': 'True'}),
'receive_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['bhp_research_protocol.Site']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'lab_result.result': {
'Meta': {'ordering': "['result_identifier', 'order', 'result_datetime']", 'object_name': 'Result', 'db_table': "'bhp_lab_core_result'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dmis_result_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_order.Order']"}),
'release_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'release_status': ('django.db.models.fields.CharField', [], {'default': "'NEW'", 'max_length': '25', 'db_index': 'True'}),
'release_username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'result_identifier': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_result.resultsource': {
'Meta': {'object_name': 'ResultSource', 'db_table': "'bhp_lab_core_resultsource'"},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_index': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'field_name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'short_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'version': ('django.db.models.fields.CharField', [], {'default': "'1.0'", 'max_length': '35'})
},
'lab_result_item.resultitem': {
'Meta': {'object_name': 'ResultItem', 'db_table': "'bhp_lab_core_resultitem'"},
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_result.Result']"}),
'result_item_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'result_item_operator': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_quantifier': ('django.db.models.fields.CharField', [], {'default': "'='", 'max_length': '25'}),
'result_item_source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_result.ResultSource']"}),
'result_item_source_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_value': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'test_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_test_code.TestCode']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'validation_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'validation_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '10', 'db_index': 'True'}),
'validation_username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'lab_result_item.resultitemaudit': {
'Meta': {'ordering': "['-_audit_timestamp']", 'object_name': 'ResultItemAudit', 'db_table': "'bhp_lab_core_resultitem_audit'"},
'_audit_change_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'_audit_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'_audit_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'error_code': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'result': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_resultitem'", 'to': "orm['lab_result.Result']"}),
'result_item_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'result_item_operator': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_quantifier': ('django.db.models.fields.CharField', [], {'default': "'='", 'max_length': '25'}),
'result_item_source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_resultitem'", 'to': "orm['lab_result.ResultSource']"}),
'result_item_source_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'result_item_value': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'test_code': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_audit_resultitem'", 'to': "orm['lab_test_code.TestCode']"}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'validation_datetime': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'validation_reference': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'validation_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '10', 'db_index': 'True'}),
'validation_username': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'lab_test_code.testcode': {
'Meta': {'ordering': "['name']", 'object_name': 'TestCode', 'db_table': "'bhp_lab_test_code_testcode'"},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'display_decimal_places': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'formula': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'null': 'True', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_absolute': ('django.db.models.fields.CharField', [], {'default': "'absolute'", 'max_length': "'15'"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'test_code_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lab_test_code.TestCodeGroup']"}),
'units': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
},
'lab_test_code.testcodegroup': {
'Meta': {'ordering': "['code']", 'object_name': 'TestCodeGroup', 'db_table': "'bhp_lab_test_code_testcodegroup'"},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'hostname_created': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'hostname_modified': ('django.db.models.fields.CharField', [], {'default': "'home'", 'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'user_created': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'}),
'user_modified': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '250'})
}
}
complete_apps = ['lab_result_item']
| [
"[email protected]"
]
| |
b620e42042438f0ddf82969a5e2f05dcf02a8e23 | 3922c05b9434bb5a96f7833a987c50c8e3e29107 | /news/admin.py | 6881fe98a61e98e3099d1a8b53bfb646d84da9fa | [
"MIT"
]
| permissive | jasonmuchiri/moringa-tribune | e7769dca9aa2e7a9cdc62be56c3071104ba30f33 | ceabe0cf9cc136b6eb5072253aef09f43bea7040 | refs/heads/master | 2020-05-23T23:19:55.111831 | 2019-05-18T21:32:44 | 2019-05-18T21:32:44 | 186,990,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py | from django.contrib import admin
from .models import Article,tags
# Register your models here.
class ArticleAdmin(admin.ModelAdmin):
filter_horizontal = ('tags',)
admin.site.register(Article,ArticleAdmin)
admin.site.register(tags)
| [
"[email protected]"
]
| |
6bd5fb8e2cc28159a3d0726aa5efc0e21295b713 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/contour/_textsrc.py | 43bd0d62ed17e92c16a553b953658aaf6d67f0be | [
"MIT"
]
| permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 393 | py | import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="contour", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| [
"[email protected]"
]
| |
6ad6148e7378b35ec5dbeeb2a493dbad852d7119 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/bob/61f29ff81b664630acba807a6f4f14e9.py | b34cc9defb4248705933ec5d41f5347dc783be44 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 286 | py | #
# Skeleton file for the Python "Bob" exercise.
#
def hey(what):
if what.upper() == what and any(c.isalpha() for c in what):
return "Whoa, chill out!"
if what != '' and what[-1] == '?':
return "Sure."
if len(what) < 7:
return "Fine. Be that way!"
else:
return "Whatever."
| [
"[email protected]"
]
| |
557c96972141d1a75b7f45e4289a642a6390440e | 08dfaf714830a6310742dcd50848790d595e838e | /位运算/code_01_EvenTimesOddTimes.py | c16881e90ab21aa241caa096e317d2dd06fa949c | []
| no_license | Tokyo113/leetcode_python | d9e0fb96a76efaadcec7aad08f5ef542d898d434 | e86b3fb26aef1cf63727e3e5c9fd4ddc9bedb7f1 | refs/heads/master | 2020-08-10T15:36:10.364714 | 2020-04-13T08:28:53 | 2020-04-13T08:28:53 | 214,369,187 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | #coding:utf-8
'''
@Time: 2019/12/4 21:54
@author: Tokyo
@file: code_01_EvenTimesOddTimes.py
@desc:
1.一个数组中有一种数出现了奇数次,其他数都出现了偶数次,怎么找到这一个数
2.一个数组中有两种数出现了奇数次,其他数都出现了偶数次,怎么找到这两个数
'''
def findOddTimes1(arr):
eor = 0
for i in arr:
eor = eor ^ i
return eor
def findOddTimes2(arr):
eor = 0
for i in arr:
eor = eor ^ i
# eor = a ^ b
# 取得eor最右侧的1,eor肯定不为0,存在一位为1
# 这两个数肯定在这一位不一样,一个为1,一个为0
rightone = eor & (~eor+1)
eor1 = 0
for i in arr:
if (i&rightone) == 0:
eor1 = eor1 ^ i
return eor1, eor1^eor
if __name__ == '__main__':
a = [1,2,3,2,1,2,4,4,3,2,5]
print(findOddTimes1(a))
b = [4, 3, 4, 2, 2, 1, 4, 1, 1, 1, 3, 3, 1, 1, 1, 4, 2, 2]
print(findOddTimes2(b))
print(find2(b)) | [
"[email protected]"
]
| |
39c078ee69d1098e1c91f37879882232c475e2f0 | 59b0ebc4249f20edd0e87dc63784c6e8c138c7fd | /.history/fibonacci_20180603232558.py | 0f355ae930f9f8d834a1e6a158738d3573e77163 | []
| no_license | Los4U/first_python_programs | f397da10be3ef525995f3f220e3b60012a6accaa | c3fc33a38c84abd292cb2e86de63e09434fc7fc4 | refs/heads/master | 2020-03-22T08:09:40.426118 | 2018-07-04T17:17:58 | 2018-07-04T17:17:58 | 139,748,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | py | i = 0
j = 1
k = 0
fib = 0
user_input = int(input("How many numbers print out? : "))
for fn in range(user_input):
#if i < 30:
print('{0:2d} {1:>10}'.format(fn, fib))
#print(fib)
fib = j+k
j = k
k = fib
#else:
# print("3")
| [
"[email protected]"
]
| |
85c2a8dc30a8c4d16a1497f4bad44935f7ca19d2 | 81485dc96f7539730bee976c7e8e3d5929c3df77 | /ProjectEuler145.py | a3fd109802113ff296456c4d9fc9e471357fb859 | []
| no_license | zfhrp6/PE | 6462621f5cb3812c7d8d9f591ad66382490661e2 | 4e64f6549bd50fb4c1ee5f580a76764935e35360 | refs/heads/master | 2020-04-05T13:45:56.936758 | 2015-12-11T08:29:50 | 2015-12-11T08:29:50 | 4,495,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 940 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
project euler problem 145
ある正の整数nについて、[n + reverse(n)]が奇数のみで表されるようなnが存在する。
えば、36 + 63 = 99, 409 + 904 = 1313 のように。この性質を持つ数を、reversibleと呼ぶことにする。
つまり、36, 63, 409, 904はrevesibleである。
先頭の0はnでもreverse(n)でも許されない。
1000未満には120個のreversibleな数が存在する。
10億(10^9)未満では、いくつのreversibleな数が存在するか。
"""
import time
t0 = time.time()
answer = 0
i = 0
while i < 10 ** 9:
i += 1
if i % 10 == 0:
continue
if i % 1000000 == 1:
print(i)
num = i + int(str(i)[::-1])
if "0" in str(num) or "2" in str(num) or "4" in str(num) or "6" in str(num) or "8" in str(num):
continue
else:
answer += 1
print(answer)
print(time.time() - t0, "seconds")
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.