blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0e8eb7cfd16b0572c20b19d0ca5619c7eecbd659 | 7c9917d62959b8d69309d44362481356e632083e | /enzymatic_bias/profile_model_of_bias/kmer_init_tuned/dnase/24mer/model.py | 8e07a5bc7b4e1400fd6be929ca7b482df4c88a82 | [
"MIT"
] | permissive | kundajelab/bias_correction | bee77bd2d36268aa6b7046b817e9e349c8cc8238 | 521678ea8739473f793b0ce85e22e622d13df6fe | refs/heads/master | 2021-06-21T11:34:54.558788 | 2021-06-10T06:39:35 | 2021-06-10T06:39:35 | 218,137,376 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,974 | py | import pickle
import pdb
import numpy as np ;
from keras.backend import int_shape
from sklearn.metrics import average_precision_score
from kerasAC.metrics import *
from kerasAC.custom_losses import *
import keras;
#import the various keras layers
from keras.layers import Dense,Activation,Dropout,Flatten,Reshape,Input, Concatenate, Cropping1D, Add
from keras.layers.core import Dropout, Reshape, Dense, Activation, Flatten
from keras.layers.convolutional import Conv1D
from keras.layers.pooling import GlobalMaxPooling1D,MaxPooling1D,GlobalAveragePooling1D
from keras.layers.normalization import BatchNormalization
from keras.optimizers import Adam
from keras.constraints import maxnorm;
from keras.regularizers import l1, l2
from keras.models import Model
def get_model_param_dict(param_file):
'''
param_file has 2 columns -- param name in column 1, and param value in column 2
'''
params={}
if param_file is None:
return params
for line in open(param_file,'r').read().strip().split('\n'):
tokens=line.split('\t')
params[tokens[0]]=tokens[1]
return params
def getModelGivenModelOptionsAndWeightInits(args):
#default params (can be overwritten by providing model_params file as input to the training function)
filters=1
conv1_kernel_size=6
control_smoothing=[1, 50]
counts_loss_weight=1
profile_loss_weight=1
model_params=get_model_param_dict(args.model_params)
if 'filters' in model_params:
filters=int(model_params['filters'])
if 'conv1_kernel_size' in model_params:
conv1_kernel_size=int(model_params['conv1_kernel_size'])
if 'counts_loss_weight' in model_params:
counts_loss_weight=float(model_params['counts_loss_weight'])
if 'profile_loss_weight' in model_params:
profile_loss_weight=float(model_params['profile_loss_weight'])
print("params:")
print("filters:"+str(filters))
print("conv1_kernel_size:"+str(conv1_kernel_size))
print("counts_loss_weight:"+str(counts_loss_weight))
print("profile_loss_weight:"+str(profile_loss_weight))
#load the fixed weights
tobias_data_dnase_k562=pickle.load(open("/srv/scratch/annashch/bias_correction/enzymatic_bias/tobias/dnase/K562.filtered_AtacBias.pickle",'rb'))
tobias_dnase_pssm_forward=np.transpose(tobias_data_dnase_k562.bias['forward'].pssm[0:4])[:,[0,2,3,1]]
conv1_pwm=np.expand_dims(tobias_dnase_pssm_forward,axis=-1)
conv1_bias=np.zeros((1,))
conv1_frozen_weights=[conv1_pwm, conv1_bias]
#read in arguments
seed=args.seed
init_weights=args.init_weights
sequence_flank=args.tdb_input_flank[0]
num_tasks=args.num_tasks
seq_len=2*sequence_flank
out_flank=args.tdb_output_flank[0]
out_pred_len=2*out_flank
print(seq_len)
print(out_pred_len)
#define inputs
inp = Input(shape=(seq_len, 4),name='sequence')
# first convolution without dilation
first_conv = Conv1D(filters,
weights=conv1_frozen_weights,
kernel_size=conv1_kernel_size,
padding='valid',
activation='relu',
name='1st_conv')(inp)
profile_out_prebias_shape =int_shape(first_conv)
cropsize = int(profile_out_prebias_shape[1]/2)-int(out_pred_len/2)
if profile_out_prebias_shape[1]%2==0:
crop_left=cropsize
crop_right=cropsize
else:
crop_left=cropsize
crop_right=cropsize+1
print(crop_left)
print(crop_right)
profile_out_prebias = Cropping1D((crop_left,crop_right),
name='prof_out_crop2match_output')(first_conv)
profile_out = Conv1D(filters=num_tasks,
kernel_size=1,
name="profile_predictions")(profile_out_prebias)
gap_combined_conv = GlobalAveragePooling1D(name='gap')(first_conv)
count_out = Dense(num_tasks, name="logcount_predictions")(gap_combined_conv)
model=Model(inputs=[inp],outputs=[profile_out,
count_out])
print("got model")
model.compile(optimizer=Adam(),
loss=[MultichannelMultinomialNLL(1),'mse'],
loss_weights=[profile_loss_weight,counts_loss_weight])
print("compiled model")
return model
if __name__=="__main__":
import argparse
parser=argparse.ArgumentParser(description="view model arch")
parser.add_argument("--seed",type=int,default=1234)
parser.add_argument("--init_weights",default=None)
parser.add_argument("--tdb_input_flank",nargs="+",default=[673])
parser.add_argument("--tdb_output_flank",nargs="+",default=[500])
parser.add_argument("--num_tasks",type=int,default=1)
parser.add_argument("--model_params",default=None)
args=parser.parse_args()
model=getModelGivenModelOptionsAndWeightInits(args)
print(model.summary())
pdb.set_trace()
| [
"[email protected]"
] | |
c51867d2cd55c0e97a84af6332cfcfd529eeb1d2 | 40be7c7a50b839a922c22ea624123b11e5da25cb | /feria/migrations/0003_franquicia_imagen.py | c252e9fbb8cc5b7835b3b8c49f36ae453c9fd74f | [] | no_license | LuberNavarrete/sistema | e70b15d0410402ceb6f3ba2a320886d5b225c65c | e4800f3aa2cdde69189a43dcf9543be85ed14693 | refs/heads/master | 2021-01-10T16:54:21.479466 | 2015-11-15T14:21:20 | 2015-11-15T14:21:20 | 44,011,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feria', '0002_auto_20151008_1937'),
]
operations = [
migrations.AddField(
model_name='franquicia',
name='imagen',
field=models.ImageField(null=True, upload_to=b'imagenes'),
),
]
| [
"[email protected]"
] | |
951e87820000a2ba516ce5733b3db4e751382387 | be0eda70579e191d7dd1ace87ccbda8a3e85474e | /app01/urls.py | 78bd401466e3cf6df768de0ca595ee3d970007da | [] | no_license | zxycode-2020/django_tutrital2 | 084ebe4a83e7a9724163ae54b816239ff2b0cce6 | 969ce0b3caca92c045afee0e5eb628f9afb35b48 | refs/heads/master | 2022-05-03T23:22:17.075830 | 2020-02-05T03:11:09 | 2020-02-05T03:11:09 | 236,920,126 | 0 | 0 | null | 2022-04-22T23:00:29 | 2020-01-29T06:40:39 | Python | UTF-8 | Python | false | false | 577 | py | from django.urls import path, include
from app01.views import index, article, test_url, student, \
students, args, reg, xuanran, orm_test, post_cls, get_cls
urlpatterns = [
path('index/', index),
path('article/<str:aid>/', article),
path('test_url/', test_url),
path('students/', students), # 学生列表
path('student/<str:stu_id>/', student), # 学生单个
path('args/', args),
path('reg/', reg),
path('xuanran/', xuanran),
path('orm_test/', orm_test),
path('post_cls/', post_cls),
path('get_cls/', get_cls),
]
| [
"[email protected]"
] | |
2b792a76a6d249e279abf8afff4ad007a551e9e7 | d42a9128898d504a9831f1afee3198c4677236c9 | /Level_3/가장먼노드.py | 19833971ef9bcd673871a0c8a749d8662f7847c8 | [] | no_license | ketkat001/Programmers-coding | 6848a9c8cffd97b792cfc8856ec135b72af5d688 | 799baba8d66a9971b43233d231cecbf262b4ea27 | refs/heads/master | 2023-09-02T23:07:25.614820 | 2021-10-17T18:12:02 | 2021-10-17T18:12:02 | 235,016,879 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 650 | py | from collections import deque
def solution(n, edge):
answer = 0
graph = [[] for _ in range(n+1)]
dp = [0] * (n+1)
dp[1] = 1
queue = deque([1])
for edg in edge:
graph[edg[0]].append(edg[1])
graph[edg[1]].append(edg[0])
while queue:
answer = len(queue)
for i in range(answer):
next_node = queue.popleft()
for target_node in graph[next_node]:
if dp[target_node] == 0:
dp[target_node] = 1
queue.append(target_node)
return answer
print(solution(6, [[3, 6], [4, 3], [3, 2], [1, 3], [1, 2], [2, 4], [5, 2]])) | [
"[email protected]"
] | |
a24038b8a160f6fdb13f63be8d66b6cae29dd3e1 | 42c48f3178a48b4a2a0aded547770027bf976350 | /google/ads/google_ads/v5/proto/services/account_link_service_pb2.py | 972899082ca981068d71b76032f3fa541ac8301d | [
"Apache-2.0"
] | permissive | fiboknacky/google-ads-python | e989464a85f28baca1f28d133994c73759e8b4d6 | a5b6cede64f4d9912ae6ad26927a54e40448c9fe | refs/heads/master | 2021-08-07T20:18:48.618563 | 2020-12-11T09:21:29 | 2020-12-11T09:21:29 | 229,712,514 | 0 | 0 | Apache-2.0 | 2019-12-23T08:44:49 | 2019-12-23T08:44:49 | null | UTF-8 | Python | false | true | 21,860 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v5/proto/services/account_link_service.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v5.proto.resources import account_link_pb2 as google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_account__link__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v5/proto/services/account_link_service.proto',
package='google.ads.googleads.v5.services',
syntax='proto3',
serialized_options=b'\n$com.google.ads.googleads.v5.servicesB\027AccountLinkServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v5/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V5.Services\312\002 Google\\Ads\\GoogleAds\\V5\\Services\352\002$Google::Ads::GoogleAds::V5::Services',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\nAgoogle/ads/googleads_v5/proto/services/account_link_service.proto\x12 google.ads.googleads.v5.services\x1a:google/ads/googleads_v5/proto/resources/account_link.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\"\\\n\x15GetAccountLinkRequest\x12\x43\n\rresource_name\x18\x01 \x01(\tB,\xe0\x41\x02\xfa\x41&\n$googleads.googleapis.com/AccountLink\"\x7f\n\x18\x43reateAccountLinkRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12I\n\x0c\x61\x63\x63ount_link\x18\x02 \x01(\x0b\x32..google.ads.googleads.v5.resources.AccountLinkB\x03\xe0\x41\x02\"2\n\x19\x43reateAccountLinkResponse\x12\x15\n\rresource_name\x18\x01 \x01(\t\"\xb4\x01\n\x18MutateAccountLinkRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12N\n\toperation\x18\x02 \x01(\x0b\x32\x36.google.ads.googleads.v5.services.AccountLinkOperationB\x03\xe0\x41\x02\x12\x17\n\x0fpartial_failure\x18\x03 \x01(\x08\x12\x15\n\rvalidate_only\x18\x04 \x01(\x08\"5\n\x14\x41\x63\x63ountLinkOperation\x12\x10\n\x06remove\x18\x03 \x01(\tH\x00\x42\x0b\n\toperation\"f\n\x19MutateAccountLinkResponse\x12I\n\x06result\x18\x01 \x01(\x0b\x32\x39.google.ads.googleads.v5.services.MutateAccountLinkResult\"0\n\x17MutateAccountLinkResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xc2\x05\n\x12\x41\x63\x63ountLinkService\x12\xc1\x01\n\x0eGetAccountLink\x12\x37.google.ads.googleads.v5.services.GetAccountLinkRequest\x1a..google.ads.googleads.v5.resources.AccountLink\"F\x82\xd3\xe4\x93\x02\x30\x12./v5/{resource_name=customers/*/accountLinks/*}\xda\x41\rresource_name\x12\xe5\x01\n\x11\x43reateAccountLink\x12:.google.ads.googleads.v5.services.CreateAccountLinkRequest\x1a;.google.ads.googleads.v5.services.CreateAccountLinkResponse\"W\x82\xd3\xe4\x93\x02\x36\"1/v5/customers/{customer_id=*}/accountLinks:create:\x01*\xda\x41\x18\x63ustomer_id,account_link\x12\xe2\x01\n\x11MutateAccountLink\x12:.google.ads.googleads.v5.services.MutateAccountLinkRequest\x1a;.google.ads.googleads.v5.services.MutateAccountLinkResponse\"T\x82\xd3\xe4\x93\x02\x36\"1/v5/customers/{customer_id=*}/accountLinks:mutate:\x01*\xda\x41\x15\x63ustomer_id,operation\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xfe\x01\n$com.google.ads.googleads.v5.servicesB\x17\x41\x63\x63ountLinkServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v5/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V5.Services\xca\x02 Google\\Ads\\GoogleAds\\V5\\Services\xea\x02$Google::Ads::GoogleAds::V5::Servicesb\x06proto3'
,
dependencies=[google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_account__link__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,])
_GETACCOUNTLINKREQUEST = _descriptor.Descriptor(
name='GetAccountLinkRequest',
full_name='google.ads.googleads.v5.services.GetAccountLinkRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.services.GetAccountLinkRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A&\n$googleads.googleapis.com/AccountLink', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=278,
serialized_end=370,
)
_CREATEACCOUNTLINKREQUEST = _descriptor.Descriptor(
name='CreateAccountLinkRequest',
full_name='google.ads.googleads.v5.services.CreateAccountLinkRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v5.services.CreateAccountLinkRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='account_link', full_name='google.ads.googleads.v5.services.CreateAccountLinkRequest.account_link', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=372,
serialized_end=499,
)
_CREATEACCOUNTLINKRESPONSE = _descriptor.Descriptor(
name='CreateAccountLinkResponse',
full_name='google.ads.googleads.v5.services.CreateAccountLinkResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.services.CreateAccountLinkResponse.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=501,
serialized_end=551,
)
_MUTATEACCOUNTLINKREQUEST = _descriptor.Descriptor(
name='MutateAccountLinkRequest',
full_name='google.ads.googleads.v5.services.MutateAccountLinkRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v5.services.MutateAccountLinkRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='operation', full_name='google.ads.googleads.v5.services.MutateAccountLinkRequest.operation', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='partial_failure', full_name='google.ads.googleads.v5.services.MutateAccountLinkRequest.partial_failure', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.ads.googleads.v5.services.MutateAccountLinkRequest.validate_only', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=554,
serialized_end=734,
)
_ACCOUNTLINKOPERATION = _descriptor.Descriptor(
name='AccountLinkOperation',
full_name='google.ads.googleads.v5.services.AccountLinkOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='remove', full_name='google.ads.googleads.v5.services.AccountLinkOperation.remove', index=0,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v5.services.AccountLinkOperation.operation',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=736,
serialized_end=789,
)
_MUTATEACCOUNTLINKRESPONSE = _descriptor.Descriptor(
name='MutateAccountLinkResponse',
full_name='google.ads.googleads.v5.services.MutateAccountLinkResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='google.ads.googleads.v5.services.MutateAccountLinkResponse.result', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=791,
serialized_end=893,
)
_MUTATEACCOUNTLINKRESULT = _descriptor.Descriptor(
name='MutateAccountLinkResult',
full_name='google.ads.googleads.v5.services.MutateAccountLinkResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v5.services.MutateAccountLinkResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=895,
serialized_end=943,
)
_CREATEACCOUNTLINKREQUEST.fields_by_name['account_link'].message_type = google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_account__link__pb2._ACCOUNTLINK
_MUTATEACCOUNTLINKREQUEST.fields_by_name['operation'].message_type = _ACCOUNTLINKOPERATION
_ACCOUNTLINKOPERATION.oneofs_by_name['operation'].fields.append(
_ACCOUNTLINKOPERATION.fields_by_name['remove'])
_ACCOUNTLINKOPERATION.fields_by_name['remove'].containing_oneof = _ACCOUNTLINKOPERATION.oneofs_by_name['operation']
_MUTATEACCOUNTLINKRESPONSE.fields_by_name['result'].message_type = _MUTATEACCOUNTLINKRESULT
DESCRIPTOR.message_types_by_name['GetAccountLinkRequest'] = _GETACCOUNTLINKREQUEST
DESCRIPTOR.message_types_by_name['CreateAccountLinkRequest'] = _CREATEACCOUNTLINKREQUEST
DESCRIPTOR.message_types_by_name['CreateAccountLinkResponse'] = _CREATEACCOUNTLINKRESPONSE
DESCRIPTOR.message_types_by_name['MutateAccountLinkRequest'] = _MUTATEACCOUNTLINKREQUEST
DESCRIPTOR.message_types_by_name['AccountLinkOperation'] = _ACCOUNTLINKOPERATION
DESCRIPTOR.message_types_by_name['MutateAccountLinkResponse'] = _MUTATEACCOUNTLINKRESPONSE
DESCRIPTOR.message_types_by_name['MutateAccountLinkResult'] = _MUTATEACCOUNTLINKRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetAccountLinkRequest = _reflection.GeneratedProtocolMessageType('GetAccountLinkRequest', (_message.Message,), {
'DESCRIPTOR' : _GETACCOUNTLINKREQUEST,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """Request message for [AccountLinkService.GetAccountLink][google.ads.goo
gleads.v5.services.AccountLinkService.GetAccountLink].
Attributes:
resource_name:
Required. Resource name of the account link.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.GetAccountLinkRequest)
})
_sym_db.RegisterMessage(GetAccountLinkRequest)
CreateAccountLinkRequest = _reflection.GeneratedProtocolMessageType('CreateAccountLinkRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEACCOUNTLINKREQUEST,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """Request message for [AccountLinkService.CreateAccountLink][google.ads.
googleads.v5.services.AccountLinkService.CreateAccountLink].
Attributes:
customer_id:
Required. The ID of the customer for which the account link is
created.
account_link:
Required. The account link to be created.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.CreateAccountLinkRequest)
})
_sym_db.RegisterMessage(CreateAccountLinkRequest)
CreateAccountLinkResponse = _reflection.GeneratedProtocolMessageType('CreateAccountLinkResponse', (_message.Message,), {
'DESCRIPTOR' : _CREATEACCOUNTLINKRESPONSE,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """Response message for [AccountLinkService.CreateAccountLink][google.ads
.googleads.v5.services.AccountLinkService.CreateAccountLink].
Attributes:
resource_name:
Returned for successful operations. Resource name of the
account link.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.CreateAccountLinkResponse)
})
_sym_db.RegisterMessage(CreateAccountLinkResponse)
MutateAccountLinkRequest = _reflection.GeneratedProtocolMessageType('MutateAccountLinkRequest', (_message.Message,), {
'DESCRIPTOR' : _MUTATEACCOUNTLINKREQUEST,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """Request message for [AccountLinkService.MutateAccountLink][google.ads.
googleads.v5.services.AccountLinkService.MutateAccountLink].
Attributes:
customer_id:
Required. The ID of the customer being modified.
operation:
Required. The operation to perform on the link.
partial_failure:
If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will
be carried out in one transaction if and only if they are all
valid. Default is false.
validate_only:
If true, the request is validated but not executed. Only
errors are returned, not results.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.MutateAccountLinkRequest)
})
_sym_db.RegisterMessage(MutateAccountLinkRequest)
AccountLinkOperation = _reflection.GeneratedProtocolMessageType('AccountLinkOperation', (_message.Message,), {
'DESCRIPTOR' : _ACCOUNTLINKOPERATION,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """A single update on an account link.
Attributes:
operation:
The operation to perform.
remove:
Remove operation: A resource name for the account link to
remove is expected, in this format:
``customers/{customer_id}/accountLinks/{account_link_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.AccountLinkOperation)
})
_sym_db.RegisterMessage(AccountLinkOperation)
MutateAccountLinkResponse = _reflection.GeneratedProtocolMessageType('MutateAccountLinkResponse', (_message.Message,), {
'DESCRIPTOR' : _MUTATEACCOUNTLINKRESPONSE,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """Response message for account link mutate.
Attributes:
result:
Result for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.MutateAccountLinkResponse)
})
_sym_db.RegisterMessage(MutateAccountLinkResponse)
MutateAccountLinkResult = _reflection.GeneratedProtocolMessageType('MutateAccountLinkResult', (_message.Message,), {
'DESCRIPTOR' : _MUTATEACCOUNTLINKRESULT,
'__module__' : 'google.ads.googleads_v5.proto.services.account_link_service_pb2'
,
'__doc__': """The result for the account link mutate.
Attributes:
resource_name:
Returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v5.services.MutateAccountLinkResult)
})
_sym_db.RegisterMessage(MutateAccountLinkResult)
DESCRIPTOR._options = None
_GETACCOUNTLINKREQUEST.fields_by_name['resource_name']._options = None
_CREATEACCOUNTLINKREQUEST.fields_by_name['customer_id']._options = None
_CREATEACCOUNTLINKREQUEST.fields_by_name['account_link']._options = None
_MUTATEACCOUNTLINKREQUEST.fields_by_name['customer_id']._options = None
_MUTATEACCOUNTLINKREQUEST.fields_by_name['operation']._options = None
_ACCOUNTLINKSERVICE = _descriptor.ServiceDescriptor(
name='AccountLinkService',
full_name='google.ads.googleads.v5.services.AccountLinkService',
file=DESCRIPTOR,
index=0,
serialized_options=b'\312A\030googleads.googleapis.com',
create_key=_descriptor._internal_create_key,
serialized_start=946,
serialized_end=1652,
methods=[
_descriptor.MethodDescriptor(
name='GetAccountLink',
full_name='google.ads.googleads.v5.services.AccountLinkService.GetAccountLink',
index=0,
containing_service=None,
input_type=_GETACCOUNTLINKREQUEST,
output_type=google_dot_ads_dot_googleads__v5_dot_proto_dot_resources_dot_account__link__pb2._ACCOUNTLINK,
serialized_options=b'\202\323\344\223\0020\022./v5/{resource_name=customers/*/accountLinks/*}\332A\rresource_name',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateAccountLink',
full_name='google.ads.googleads.v5.services.AccountLinkService.CreateAccountLink',
index=1,
containing_service=None,
input_type=_CREATEACCOUNTLINKREQUEST,
output_type=_CREATEACCOUNTLINKRESPONSE,
serialized_options=b'\202\323\344\223\0026\"1/v5/customers/{customer_id=*}/accountLinks:create:\001*\332A\030customer_id,account_link',
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='MutateAccountLink',
full_name='google.ads.googleads.v5.services.AccountLinkService.MutateAccountLink',
index=2,
containing_service=None,
input_type=_MUTATEACCOUNTLINKREQUEST,
output_type=_MUTATEACCOUNTLINKRESPONSE,
serialized_options=b'\202\323\344\223\0026\"1/v5/customers/{customer_id=*}/accountLinks:mutate:\001*\332A\025customer_id,operation',
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_ACCOUNTLINKSERVICE)
DESCRIPTOR.services_by_name['AccountLinkService'] = _ACCOUNTLINKSERVICE
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
ecad99a07312379715f64a3e39b3ea5577d254ee | c5746efe18a5406764c041d149d89c0e0564c5a5 | /1. Python语言核心编程/1. Python核心/Day07/exercise11.py | a656871fe2f7a823083003ada2bd5ae8242e8c9c | [] | no_license | ShaoxiongYuan/PycharmProjects | fc7d9eeaf833d3711211cd2fafb81dd277d4e4a3 | 5111d4c0a7644c246f96e2d038c1a10b0648e4bf | refs/heads/master | 2021-12-15T05:45:42.117000 | 2021-11-23T06:45:16 | 2021-11-23T06:45:16 | 241,294,858 | 3 | 1 | null | 2021-02-20T15:29:07 | 2020-02-18T07:06:08 | Jupyter Notebook | UTF-8 | Python | false | false | 175 | py | def sum_digit(num):
"""
:param num:
:return:
"""
count = 0
for item in str(num):
count += int(item)
return count
print(sum_digit(1234))
| [
"[email protected]"
] | |
4052c872dbac2fd274177618ea0b913cd7c86450 | 6a9f06b967d7641ddff7b56425651b29d3e577f4 | /mindinsight/mindinsight/backend/datavisual/train_visual_api.py | a868a443c817c402a689b20195737d12c7706bd9 | [
"Apache-2.0"
] | permissive | ZeroWangZY/DL-VIS | b3117016547007b88dc66cfe7339ef02b0d84e9c | 8be1c70c44913a6f67dd424aa0e0330f82e48b06 | refs/heads/master | 2023-08-18T00:22:30.906432 | 2020-12-04T03:35:50 | 2020-12-04T03:35:50 | 232,723,696 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,850 | py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Backend interface module.
This module provides the interfaces to train processors functions.
"""
from flask import Blueprint
from flask import request
from flask import jsonify
from mindinsight.conf import settings
from mindinsight.datavisual.utils.tools import get_train_id
from mindinsight.datavisual.utils.tools import if_nan_inf_to_none
from mindinsight.datavisual.processors.histogram_processor import HistogramProcessor
from mindinsight.datavisual.processors.images_processor import ImageProcessor
from mindinsight.datavisual.processors.scalars_processor import ScalarsProcessor
from mindinsight.datavisual.processors.graph_processor import GraphProcessor
from mindinsight.datavisual.data_transform.data_manager import DATA_MANAGER
BLUEPRINT = Blueprint("train_visual", __name__, url_prefix=settings.URL_PATH_PREFIX+settings.API_PREFIX)
@BLUEPRINT.route("/datavisual/image/metadata", methods=["GET"])
def image_metadata():
"""
Interface to fetch metadata about the images for the particular run,tag, and zero-indexed sample.
Returns:
Response, which contains a list in JSON containing image events, each
one of which is an object containing items wall_time, step, width,
height, and query.
"""
tag = request.args.get("tag")
train_id = get_train_id(request)
processor = ImageProcessor(DATA_MANAGER)
response = processor.get_metadata_list(train_id, tag)
return jsonify(response)
@BLUEPRINT.route("/datavisual/image/single-image", methods=["GET"])
def single_image():
"""
Interface to fetch raw image data for a particular image.
Returns:
Response, which contains a byte string of image.
"""
tag = request.args.get("tag")
step = request.args.get("step")
train_id = get_train_id(request)
processor = ImageProcessor(DATA_MANAGER)
img_data = processor.get_single_image(train_id, tag, step)
return img_data
@BLUEPRINT.route("/datavisual/scalar/metadata", methods=["GET"])
def scalar_metadata():
"""
Interface to fetch metadata about the scalars for the particular run and tag.
Returns:
Response, which contains a list in JSON containing scalar events, each
one of which is an object containing items' wall_time, step and value.
"""
tag = request.args.get("tag")
train_id = get_train_id(request)
processor = ScalarsProcessor(DATA_MANAGER)
response = processor.get_metadata_list(train_id, tag)
metadatas = response['metadatas']
for metadata in metadatas:
value = metadata.get("value")
metadata["value"] = if_nan_inf_to_none('scalar_value', value)
return jsonify(response)
@BLUEPRINT.route("/datavisual/graphs/nodes", methods=["GET"])
def graph_nodes():
"""
Interface to get graph nodes.
Returns:
Response, which contains a JSON object.
"""
name = request.args.get('name', default=None)
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
graph_process = GraphProcessor(train_id, DATA_MANAGER, tag)
response = graph_process.list_nodes(scope=name)
return jsonify(response)
@BLUEPRINT.route("/datavisual/graphs/nodes/names", methods=["GET"])
def graph_node_names():
"""
Interface to query node names.
Returns:
Response, which contains a JSON object.
"""
search_content = request.args.get("search")
offset = request.args.get("offset", default=0)
limit = request.args.get("limit", default=100)
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
graph_process = GraphProcessor(train_id, DATA_MANAGER, tag)
resp = graph_process.search_node_names(search_content, offset, limit)
return jsonify(resp)
@BLUEPRINT.route("/datavisual/graphs/single-node", methods=["GET"])
def graph_search_single_node():
"""
Interface to search single node.
Returns:
Response, which contains a JSON object.
"""
name = request.args.get("name")
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
graph_process = GraphProcessor(train_id, DATA_MANAGER, tag)
resp = graph_process.search_single_node(name)
return jsonify(resp)
@BLUEPRINT.route("/datavisual/histograms", methods=["GET"])
def histogram():
"""
Interface to obtain histogram data.
Returns:
Response, which contains a JSON object.
"""
tag = request.args.get("tag", default=None)
train_id = get_train_id(request)
processor = HistogramProcessor(DATA_MANAGER)
response = processor.get_histograms(train_id, tag)
return jsonify(response)
@BLUEPRINT.route("/datavisual/scalars", methods=["GET"])
def get_scalars():
"""Get scalar data for given train_ids and tags."""
train_ids = request.args.getlist('train_id')
tags = request.args.getlist('tag')
processor = ScalarsProcessor(DATA_MANAGER)
scalars = processor.get_scalars(train_ids, tags)
return jsonify({'scalars': scalars})
def init_module(app):
"""
Init module entry.
Args:
app (Flask): The application obj.
"""
app.register_blueprint(BLUEPRINT)
| [
"[email protected]"
] | |
904accd2539767b15763cd55082659294465b998 | a2e11ec88ef3c83b9f07129e76a3681a676d164f | /demo8apr/testapp/urls.py | 612a33b0ec158d2795dc24c6b407ab4fabc9dc74 | [] | no_license | qwertypool/lofo | dadd7cd5b149a3a200b7111d803b1d0195d76642 | 3bc7bd125e7ea5a67f51dd6dd654e38a5f218055 | refs/heads/master | 2022-05-18T09:31:11.456634 | 2020-04-18T14:47:44 | 2020-04-18T14:47:44 | 256,773,858 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from django.urls import path
from testapp import views
urlpatterns = [
path('form/',views.form_view,name='forms'),
path('thankyou/',views.thankyou_view,name='thankyou'),
path('list/',views.list_view,name='list'),
path('elist/',views.elist_view,name='elist'),
path('eform/',views.eform_view,name='eform'),
path('demo/',views.demo_view,name='demo'),
] | [
"[email protected]"
] | |
825f6ccaee5f5912163c36e767b88ed23e0e1a49 | 24fe1f54fee3a3df952ca26cce839cc18124357a | /servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/snmp/src.py | b6b8d307181045b63a79b7469c70f940f94421de | [] | no_license | aperiyed/servicegraph-cloudcenter | 4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff | 9eb7975f2f6835e1c0528563a771526896306392 | refs/heads/master | 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 | Python | UTF-8 | Python | false | false | 8,786 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class Src(Mo):
"""
The SNMP source profile determines the fault information, severity level, and destination for sending messages to the SNMP destination. SNMP is an application-layer protocol that provides a message format for communication between SNMP managers and agents. SNMP provides a standardized framework and a common language used for the monitoring and management of devices in a network .
"""
meta = ClassMeta("cobra.model.snmp.Src")
meta.moClassName = "snmpSrc"
meta.rnFormat = "snmpsrc-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "SNMP Source"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x800000000000001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.tag.Tag")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.mon.MonObjDn")
meta.childClasses.add("cobra.model.snmp.RsDestGroup")
meta.childClasses.add("cobra.model.aaa.RbacAnnotation")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.tag.Annotation")
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Annotation", "annotationKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.snmp.RsDestGroup", "rsdestGroup"))
meta.childNamesAndRnPrefix.append(("cobra.model.mon.MonObjDn", "monobjdn-"))
meta.childNamesAndRnPrefix.append(("cobra.model.aaa.RbacAnnotation", "rbacDom-"))
meta.childNamesAndRnPrefix.append(("cobra.model.tag.Tag", "tagKey-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.mon.CommonPol")
meta.parentClasses.add("cobra.model.mon.InfraPol")
meta.parentClasses.add("cobra.model.mon.EPGTarget")
meta.parentClasses.add("cobra.model.mon.FabricPol")
meta.parentClasses.add("cobra.model.event.SevAsnP")
meta.parentClasses.add("cobra.model.mon.EPGPol")
meta.parentClasses.add("cobra.model.mon.InfraTarget")
meta.parentClasses.add("cobra.model.fault.SevAsnP")
meta.parentClasses.add("cobra.model.mon.FabricTarget")
meta.superClasses.add("cobra.model.mon.Src")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.rnPrefixes = [
('snmpsrc-', True),
]
prop = PropMeta("str", "annotation", "annotation", 37566, PropCategory.REGULAR)
prop.label = "Annotation. Suggested format orchestrator:value"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("annotation", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "extMngdBy", "extMngdBy", 39705, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "undefined"
prop._addConstant("msc", "msc", 1)
prop._addConstant("undefined", "undefined", 0)
meta.props.add("extMngdBy", prop)
prop = PropMeta("str", "incl", "incl", 16399, PropCategory.REGULAR)
prop.label = "Include Action"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 15
prop.defaultValueStr = "all"
prop._addConstant("all", "all", 15)
prop._addConstant("audit", "audit-logs", 4)
prop._addConstant("events", "events", 2)
prop._addConstant("faults", "faults", 1)
prop._addConstant("none", "none", 0)
prop._addConstant("session", "session-logs", 8)
meta.props.add("incl", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "minSev", "minSev", 1546, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 1
prop.defaultValueStr = "info"
prop._addConstant("cleared", "cleared", 0)
prop._addConstant("critical", "critical", 5)
prop._addConstant("info", "info", 1)
prop._addConstant("major", "major", 4)
prop._addConstant("minor", "minor", 3)
prop._addConstant("warning", "warning", 2)
meta.props.add("minSev", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 14168, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 7161, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Policy"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
32275f30d3edfcdfabbae11c0e0d3061a353a050 | 33f9056de72ea429774cdf42d3f813a4cd33a255 | /backend/takeout/admin/models/admin.py | 803776d2c167a36a3dde181c74e8d18c7d90e965 | [
"MIT"
] | permissive | alex159s/Take-out | a566e35d5c05c6e8456beb449c08b6c6479f4e79 | 27c66dcc4f0e045ae060255679a2aa68c0f744d2 | refs/heads/master | 2020-04-06T06:36:51.806309 | 2016-07-15T14:27:06 | 2016-07-15T14:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | # coding: utf-8
from lib.models.userbase import UserBase
class Admin(UserBase):
def to_string(self):
return {
"id": self.id,
"username": self.username,
"nickname": self.nickname,
}
def to_detail_string(self):
return self.to_string()
| [
"[email protected]"
] | |
d14f400d7cb6a38ec86427c746f0251aa6fa1c75 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/coverage-big-3721.py | dba02cd6001ae165033db26d67139aa3e7a13aa6 | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,348 | py | count:int = 0
count2:int = 0
count3:int = 0
count4:int = 0
count5:int = 0
def foo(s: str) -> int:
return len(s)
def foo2(s: str, s2: str) -> int:
return len(s)
def foo3(s: str, s2: str, s3: str) -> int:
return len(s)
def foo4(s: str, s2: str, s3: str, s4: str) -> int:
return len(s)
def foo5(s: str, s2: str, s3: str, s4: str, s5: str) -> int:
return len(s)
class bar(object):
p: bool = True
def baz(self:"bar", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar2(object):
p: bool = True
p2: bool = True
def baz(self:"bar2", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar2", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar3(object):
p: bool = True
p2: bool = True
p3: bool = True
def baz(self:"bar3", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar3", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar3", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar4(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
def baz(self:"bar4", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar4", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar4", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar4", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
class bar5(object):
p: bool = True
p2: bool = True
p3: bool = True
p4: bool = True
p5: bool = True
def baz(self:"bar5", xx: [int]) -> str:
global count
x:int = 0
y:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz2(self:"bar5", xx: [int], xx2: [int]) -> str:
global count
x:int = 0
x2:int = 0
y:int = 1
y2:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
$Var.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz3(self:"bar5", xx: [int], xx2: [int], xx3: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
y:int = 1
y2:int = 1
y3:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz4(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
def baz5(self:"bar5", xx: [int], xx2: [int], xx3: [int], xx4: [int], xx5: [int]) -> str:
global count
x:int = 0
x2:int = 0
x3:int = 0
x4:int = 0
x5:int = 0
y:int = 1
y2:int = 1
y3:int = 1
y4:int = 1
y5:int = 1
def qux(y: int) -> object:
nonlocal x
if x > y:
x = -1
def qux2(y: int, y2: int) -> object:
nonlocal x
nonlocal x2
if x > y:
x = -1
def qux3(y: int, y2: int, y3: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
if x > y:
x = -1
def qux4(y: int, y2: int, y3: int, y4: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
if x > y:
x = -1
def qux5(y: int, y2: int, y3: int, y4: int, y5: int) -> object:
nonlocal x
nonlocal x2
nonlocal x3
nonlocal x4
nonlocal x5
if x > y:
x = -1
for x in xx:
self.p = x == 2
qux(0) # Yay! ChocoPy
count = count + 1
while x <= 0:
if self.p:
xx[0] = xx[1]
self.p = not self.p
x = x + 1
elif foo("Long"[0]) == 1:
self.p = self is None
return "Nope"
print(bar().baz([1,2]))
| [
"[email protected]"
] | |
62d5c24f0840174a493b4036c242af7859a52887 | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/ICalGLib/__init__.py | 75c6283ac12886f54d25efafcb89d88f2ef41f10 | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 12,903 | py | # encoding: utf-8
# module gi.repository.ICalGLib
# from /usr/lib64/girepository-1.0/ICalGLib-3.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gobject as __gobject
# Variables with simple values
_namespace = 'ICalGLib'
_version = '3.0'
__weakref__ = None
# functions
def bt(): # real signature unknown; restored from __doc__
""" bt() """
pass
def errno_return(): # real signature unknown; restored from __doc__
""" errno_return() -> ICalGLib.ErrorEnum """
pass
def error_clear_errno(): # real signature unknown; restored from __doc__
""" error_clear_errno() """
pass
def error_crash_here(): # real signature unknown; restored from __doc__
""" error_crash_here() """
pass
def error_get_error_state(error): # real signature unknown; restored from __doc__
""" error_get_error_state(error:ICalGLib.ErrorEnum) -> ICalGLib.ErrorState """
pass
def error_perror(): # real signature unknown; restored from __doc__
""" error_perror() -> str """
return ""
def error_restore(error, es): # real signature unknown; restored from __doc__
""" error_restore(error:str, es:ICalGLib.ErrorState) """
pass
def error_set_errno(x): # real signature unknown; restored from __doc__
""" error_set_errno(x:ICalGLib.ErrorEnum) """
pass
def error_set_error_state(error, state): # real signature unknown; restored from __doc__
""" error_set_error_state(error:ICalGLib.ErrorEnum, state:ICalGLib.ErrorState) """
pass
def error_stop_here(): # real signature unknown; restored from __doc__
""" error_stop_here() """
pass
def error_strerror(e): # real signature unknown; restored from __doc__
""" error_strerror(e:ICalGLib.ErrorEnum) -> str """
return ""
def error_supress(error): # real signature unknown; restored from __doc__
""" error_supress(error:str) -> ICalGLib.ErrorState """
pass
def get_unknown_token_handling_setting(): # real signature unknown; restored from __doc__
""" get_unknown_token_handling_setting() -> ICalGLib.Unknowntokenhandling """
pass
def memory_add_tmp_buffer(buf=None): # real signature unknown; restored from __doc__
""" memory_add_tmp_buffer(buf=None) """
pass
def memory_append_char(buf, pos, ch): # real signature unknown; restored from __doc__
""" memory_append_char(buf:list, pos:list, ch:int) -> buf:list, pos:list """
pass
def memory_append_string(buf, pos, p_str): # real signature unknown; restored from __doc__
""" memory_append_string(buf:list, pos:list, str:str) -> buf:list, pos:list """
pass
def memory_free_buffer(buf=None): # real signature unknown; restored from __doc__
""" memory_free_buffer(buf=None) """
pass
def memory_new_buffer(size): # real signature unknown; restored from __doc__
""" memory_new_buffer(size:int) """
pass
def memory_resize_buffer(buf=None, size): # real signature unknown; restored from __doc__
""" memory_resize_buffer(buf=None, size:int) """
pass
def memory_strdup(s): # real signature unknown; restored from __doc__
""" memory_strdup(s:str) -> str """
return ""
def memory_tmp_buffer(size): # real signature unknown; restored from __doc__
""" memory_tmp_buffer(size:int) """
pass
def memory_tmp_copy(p_str): # real signature unknown; restored from __doc__
""" memory_tmp_copy(str:str) -> str """
return ""
def mime_parse(func, user_data=None): # real signature unknown; restored from __doc__
""" mime_parse(func:ICalGLib.MimeParseFunc, user_data=None) -> ICalGLib.Component """
pass
def recur_expand_recurrence(rule, start, count): # real signature unknown; restored from __doc__
""" recur_expand_recurrence(rule:str, start:int, count:int) -> list """
return []
def request_status_code(stat): # real signature unknown; restored from __doc__
""" request_status_code(stat:ICalGLib.RequestStatus) -> str """
return ""
def request_status_desc(stat): # real signature unknown; restored from __doc__
""" request_status_desc(stat:ICalGLib.RequestStatus) -> str """
return ""
def request_status_from_num(major, minor): # real signature unknown; restored from __doc__
""" request_status_from_num(major:int, minor:int) -> ICalGLib.RequestStatus """
pass
def request_status_major(stat): # real signature unknown; restored from __doc__
""" request_status_major(stat:ICalGLib.RequestStatus) -> int """
return 0
def request_status_minor(stat): # real signature unknown; restored from __doc__
""" request_status_minor(stat:ICalGLib.RequestStatus) -> int """
return 0
def restriction_check(comp): # real signature unknown; restored from __doc__
""" restriction_check(comp:ICalGLib.Component) -> int """
return 0
def restriction_compare(restr, count): # real signature unknown; restored from __doc__
""" restriction_compare(restr:ICalGLib.RestrictionKind, count:int) -> int """
return 0
def set_unknown_token_handling_setting(newSetting): # real signature unknown; restored from __doc__
""" set_unknown_token_handling_setting(newSetting:ICalGLib.Unknowntokenhandling) """
pass
def __delattr__(*args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(*args, **kwargs): # real signature unknown
pass
def __eq__(*args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(*args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(*args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __getattr__(*args, **kwargs): # real signature unknown
pass
def __ge__(*args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(*args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(*args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(*args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(*args, **kwargs): # real signature unknown
""" Might raise gi._gi.RepositoryError """
pass
def __le__(*args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(*args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(*args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(*args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(*args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(*args, **kwargs): # real signature unknown
pass
def __setattr__(*args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(*args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(*args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(*args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
# classes
from .Object import Object
from .Array import Array
from .ArrayClass import ArrayClass
from .Attach import Attach
from .AttachClass import AttachClass
from .CompIter import CompIter
from .CompIterClass import CompIterClass
from .Component import Component
from .ComponentClass import ComponentClass
from .ComponentKind import ComponentKind
from .Datetimeperiod import Datetimeperiod
from .DatetimeperiodClass import DatetimeperiodClass
from .Duration import Duration
from .DurationClass import DurationClass
from .ErrorEnum import ErrorEnum
from .ErrorState import ErrorState
from .Geo import Geo
from .GeoClass import GeoClass
from .ObjectClass import ObjectClass
from .ObjectPrivate import ObjectPrivate
from .Parameter import Parameter
from .ParameterAction import ParameterAction
from .ParameterClass import ParameterClass
from .ParameterCutype import ParameterCutype
from .ParameterEnable import ParameterEnable
from .ParameterEncoding import ParameterEncoding
from .ParameterFbtype import ParameterFbtype
from .ParameterKind import ParameterKind
from .ParameterLocal import ParameterLocal
from .ParameterPartstat import ParameterPartstat
from .ParameterRange import ParameterRange
from .ParameterRelated import ParameterRelated
from .ParameterReltype import ParameterReltype
from .ParameterRequired import ParameterRequired
from .ParameterRole import ParameterRole
from .ParameterRsvp import ParameterRsvp
from .ParameterScheduleagent import ParameterScheduleagent
from .ParameterScheduleforcesend import ParameterScheduleforcesend
from .ParameterStayinformed import ParameterStayinformed
from .ParameterSubstate import ParameterSubstate
from .ParameterValue import ParameterValue
from .ParameterXliccomparetype import ParameterXliccomparetype
from .ParameterXlicerrortype import ParameterXlicerrortype
from .Parser import Parser
from .ParserClass import ParserClass
from .ParserState import ParserState
from .Period import Period
from .PeriodClass import PeriodClass
from .Property import Property
from .PropertyAction import PropertyAction
from .PropertyBusytype import PropertyBusytype
from .PropertyCarlevel import PropertyCarlevel
from .PropertyClass import PropertyClass
from .PropertyCmd import PropertyCmd
from .PropertyKind import PropertyKind
from .PropertyMethod import PropertyMethod
from .PropertyPollcompletion import PropertyPollcompletion
from .PropertyPollmode import PropertyPollmode
from .PropertyQuerylevel import PropertyQuerylevel
from .PropertyStatus import PropertyStatus
from .PropertyTaskmode import PropertyTaskmode
from .PropertyTransp import PropertyTransp
from .PropertyXlicclass import PropertyXlicclass
from .Property_Class import Property_Class
from .RecurIterator import RecurIterator
from .RecurIteratorClass import RecurIteratorClass
from .Recurrence import Recurrence
from .RecurrenceArrayMaxValues import RecurrenceArrayMaxValues
from .RecurrenceArraySizes import RecurrenceArraySizes
from .RecurrenceClass import RecurrenceClass
from .RecurrenceFrequency import RecurrenceFrequency
from .RecurrenceSkip import RecurrenceSkip
from .RecurrenceWeekday import RecurrenceWeekday
from .Reqstat import Reqstat
from .ReqstatClass import ReqstatClass
from .RequestStatus import RequestStatus
from .RestrictionKind import RestrictionKind
from .Time import Time
from .TimeClass import TimeClass
from .TimeSpan import TimeSpan
from .TimeSpanClass import TimeSpanClass
from .Timezone import Timezone
from .TimezoneClass import TimezoneClass
from .Trigger import Trigger
from .TriggerClass import TriggerClass
from .Unknowntokenhandling import Unknowntokenhandling
from .Value import Value
from .ValueClass import ValueClass
from .ValueKind import ValueKind
from ._Array import _Array
from ._Attach import _Attach
from ._CompIter import _CompIter
from ._Component import _Component
from ._Datetimeperiod import _Datetimeperiod
from ._Duration import _Duration
from ._Geo import _Geo
from ._Parameter import _Parameter
from ._Parser import _Parser
from ._Period import _Period
from ._Property import _Property
from ._RecurIterator import _RecurIterator
from ._Recurrence import _Recurrence
from ._Reqstat import _Reqstat
from ._Time import _Time
from ._TimeSpan import _TimeSpan
from ._Timezone import _Timezone
from ._Trigger import _Trigger
from ._Value import _Value
from .__class__ import __class__
# variables with complex values
__loader__ = None # (!) real value is '<gi.importer.DynamicImporter object at 0x7f13551b1d00>'
__path__ = [
'/usr/lib64/girepository-1.0/ICalGLib-3.0.typelib',
]
__spec__ = None # (!) real value is "ModuleSpec(name='gi.repository.ICalGLib', loader=<gi.importer.DynamicImporter object at 0x7f13551b1d00>)"
| [
"[email protected]"
] | |
fe08de446db91af310d979c631ab7fd26537e457 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_121/ch9_2020_08_31_17_12_51_131522.py | f0d648d01a9c7d5c599714f7d8705d12d95bfed3 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | import math
def calcula_volume_da_esfera(r):
volume_esfera = 4 / 3 * math.pi * r ** 3
return volume_esfera | [
"[email protected]"
] | |
25bec75d335fd19663fb549bac3f111228adcee2 | f3e51466d00510f1dae58f1cb87dd53244ce4e70 | /LeetCodes/facebook/ReverseLinkedList.py | 9184e30348f7f6739d047928f0e48937973b5b12 | [] | no_license | chutianwen/LeetCodes | 40d18e7aa270f8235342f0485bfda2bd1ed960e1 | 11d6bf2ba7b50c07e048df37c4e05c8f46b92241 | refs/heads/master | 2022-08-27T10:28:16.594258 | 2022-07-24T21:23:56 | 2022-07-24T21:23:56 | 96,836,652 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 959 | py | '''
Reverse a singly linked list.
Example:
Input: 1->2->3->4->5->NULL
Output: 5->4->3->2->1->NULL
Follow up:
A linked list can be reversed either iteratively or recursively. Could you implement both?
'''
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def reverseListIterative(self, head):
new_head = None
while head:
# watch out, this order is not right!!!
# in this case, head will be head.next first, so head can be None, then head.next = new_head will have problem.
# new_head, head, head.next = head, head.next, new_head
head.next, new_head, head = new_head, head, head.next
return new_head
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
if not head or not head.next: return head
else:
node = self.reverseList(head.next)
head.next.next= = head
head.next = None
return node | [
"[email protected]"
] | |
d9557a94d789d81754209070b91182d44bae2261 | 20f951bd927e4e5cde8ef7781813fcf0d51cc3ea | /fossir/modules/events/settings.py | 45ce014e97b50016de412b5d7b4e562a48cc8776 | [] | no_license | HodardCodeclub/SoftwareDevelopment | 60a0fbab045cb1802925d4dd5012d5b030c272e0 | 6300f2fae830c0c2c73fe0afd9c684383bce63e5 | refs/heads/master | 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,847 | py |
from __future__ import unicode_literals
import os
import re
from functools import wraps
import yaml
from flask.helpers import get_root_path
from fossir.core import signals
from fossir.core.settings import ACLProxyBase, SettingProperty, SettingsProxyBase
from fossir.core.settings.converters import DatetimeConverter
from fossir.core.settings.util import get_all_settings, get_setting, get_setting_acl
from fossir.modules.events.models.settings import EventSetting, EventSettingPrincipal
from fossir.util.caching import memoize
from fossir.util.signals import values_from_signal
from fossir.util.user import iter_acl
def event_or_id(f):
@wraps(f)
def wrapper(self, event, *args, **kwargs):
from fossir.modules.events import Event
if isinstance(event, Event):
event = event.id
return f(self, int(event), *args, **kwargs)
return wrapper
class EventACLProxy(ACLProxyBase):
"""Proxy class for event-specific ACL settings"""
@event_or_id
def get(self, event, name):
"""Retrieves an ACL setting
:param event: Event (or its ID)
:param name: Setting name
"""
self._check_name(name)
return get_setting_acl(EventSettingPrincipal, self, name, self._cache, event_id=event)
@event_or_id
def set(self, event, name, acl):
"""Replaces an ACL with a new one
:param event: Event (or its ID)
:param name: Setting name
:param acl: A set containing principals (users/groups)
"""
self._check_name(name)
EventSettingPrincipal.set_acl(self.module, name, acl, event_id=event)
self._flush_cache()
@event_or_id
def contains_user(self, event, name, user):
"""Checks if a user is in an ACL.
To pass this check, the user can either be in the ACL itself
or in a group in the ACL.
:param event: Event (or its ID)
:param name: Setting name
:param user: A :class:`.User`
"""
return any(user in principal for principal in iter_acl(self.get(event, name)))
@event_or_id
def add_principal(self, event, name, principal):
"""Adds a principal to an ACL
:param event: Event (or its ID)
:param name: Setting name
:param principal: A :class:`.User` or a :class:`.GroupProxy`
"""
self._check_name(name)
EventSettingPrincipal.add_principal(self.module, name, principal, event_id=event)
self._flush_cache()
@event_or_id
def remove_principal(self, event, name, principal):
"""Removes a principal from an ACL
:param event: Event (or its ID)
:param name: Setting name
:param principal: A :class:`.User` or a :class:`.GroupProxy`
"""
self._check_name(name)
EventSettingPrincipal.remove_principal(self.module, name, principal, event_id=event)
self._flush_cache()
def merge_users(self, target, source):
"""Replaces all ACL user entries for `source` with `target`"""
EventSettingPrincipal.merge_users(self.module, target, source)
self._flush_cache()
class EventSettingsProxy(SettingsProxyBase):
"""Proxy class to access event-specific settings for a certain module"""
acl_proxy_class = EventACLProxy
@property
def query(self):
"""Returns a query object filtering by the proxy's module."""
return EventSetting.find(module=self.module)
@event_or_id
def get_all(self, event, no_defaults=False):
"""Retrieves all settings
:param event: Event (or its ID)
:param no_defaults: Only return existing settings and ignore defaults.
:return: Dict containing the settings
"""
return get_all_settings(EventSetting, EventSettingPrincipal, self, no_defaults, event_id=event)
@event_or_id
def get(self, event, name, default=SettingsProxyBase.default_sentinel):
"""Retrieves the value of a single setting.
:param event: Event (or its ID)
:param name: Setting name
:param default: Default value in case the setting does not exist
:return: The settings's value or the default value
"""
self._check_name(name)
return get_setting(EventSetting, self, name, default, self._cache, event_id=event)
@event_or_id
def set(self, event, name, value):
"""Sets a single setting.
:param event: Event (or its ID)
:param name: Setting name
:param value: Setting value; must be JSON-serializable
"""
self._check_name(name)
EventSetting.set(self.module, name, self._convert_from_python(name, value), event_id=event)
self._flush_cache()
@event_or_id
def set_multi(self, event, items):
"""Sets multiple settings at once.
:param event: Event (or its ID)
:param items: Dict containing the new settings
"""
items = {k: self._convert_from_python(k, v) for k, v in items.iteritems()}
self._split_call(items,
lambda x: EventSetting.set_multi(self.module, x, event_id=event),
lambda x: EventSettingPrincipal.set_acl_multi(self.module, x, event_id=event))
self._flush_cache()
@event_or_id
def delete(self, event, *names):
"""Deletes settings.
:param event: Event (or its ID)
:param names: One or more names of settings to delete
"""
self._split_call(names,
lambda name: EventSetting.delete(self.module, *name, event_id=event),
lambda name: EventSettingPrincipal.delete(self.module, *name, event_id=event))
self._flush_cache()
@event_or_id
def delete_all(self, event):
"""Deletes all settings.
:param event: Event (or its ID)
"""
EventSetting.delete_all(self.module, event_id=event)
EventSettingPrincipal.delete_all(self.module, event_id=event)
self._flush_cache()
class EventSettingProperty(SettingProperty):
attr = 'event'
class ThemeSettingsProxy(object):
@property
@memoize
def settings(self):
core_path = os.path.join(get_root_path('fossir'), 'modules', 'events', 'themes.yaml')
with open(core_path) as f:
core_data = f.read()
core_settings = yaml.safe_load(core_data)
# YAML doesn't give us access to anchors so we need to include the base yaml.
# Since duplicate keys are invalid (and may start failing in the future) we
# rename them - this also makes it easy to throw them away after parsing the
# file provided by a plugin.
core_data = re.sub(r'^(\S+:)$', r'__core_\1', core_data, flags=re.MULTILINE)
for plugin, path in values_from_signal(signals.plugin.get_event_themes_files.send(), return_plugins=True):
with open(path) as f:
data = f.read()
settings = {k: v
for k, v in yaml.safe_load(core_data + '\n' + data).viewitems()
if not k.startswith('__core_')}
# We assume there's no more than one theme plugin that provides defaults.
# If that's not the case the last one "wins". We could reject this but it
# is quite unlikely that people have multiple theme plugins in the first
# place, even more so theme plugins that specify defaults.
core_settings['defaults'].update(settings.get('defaults', {}))
# Same for definitions - we assume plugin authors are responsible enough
# to avoid using definition names that are likely to cause collisions.
# Either way, if someone does this on purpose chances are good they want
# to override a default style so let them do so...
for name, definition in settings.get('definitions', {}).viewitems():
definition['plugin'] = plugin
core_settings['definitions'][name] = definition
return core_settings
@property
@memoize
def themes(self):
return self.settings['definitions']
@property
@memoize
def defaults(self):
return self.settings['defaults']
@memoize
def get_themes_for(self, event_type):
return {theme_id: theme_data for theme_id, theme_data in self.themes.viewitems()
if event_type in theme_data['event_types']}
event_core_settings = EventSettingsProxy('core', {
'start_dt_override': None,
'end_dt_override': None,
'organizer_info': '',
'additional_info': ''
}, converters={
'start_dt_override': DatetimeConverter,
'end_dt_override': DatetimeConverter
})
event_contact_settings = EventSettingsProxy('contact', {
'title': 'Contact',
'emails': [],
'phones': []
})
| [
"[email protected]"
] | |
8e34a2e597940d4b033ab178c33dd25722a67540 | fc25d8a6d20b2b9dd78df754b53067e4a6c255d8 | /step01a_seaIce_monthlyaveraging_RCP45.py | eaf54dd5e6d74de9ac04cfb70b5d4a8a03153cd3 | [] | no_license | weilin2018/cesmEnsembleSeaIce | 5195bbec3b4c717d4a990e814b0a8aefa1b46857 | 7dfeac61d421951560b1562c107ffd3b72a292c9 | refs/heads/master | 2020-09-04T19:31:53.099776 | 2015-09-29T17:33:19 | 2015-09-29T17:33:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,864 | py | # code written by K. Barnhart in 2014 and 2015 in support of
# Barnhart et al., TITLE, YEAR
#
# this code represents the first step in analysing the CESMLE sea ice output
# it creates monthly averages of the sea ice concentration and sea ice extent.
#
# to run it, please verify that all the modules listed below are installed on
# your machine and change the input and output file paths.
print 'importing modules'
import nio
import numpy as np
from netCDF4 import Dataset
from datetime import datetime
import os
import glob
# set paths for input and output folder
# input folder should contain all of the aice_d files from the CESM-LE
# for users of Yellowstone/glade, this file is located at
# glade/p/cesm0005/CESM-CAM5-BGC-LE/ice/proc/tseries/daily/aice_d/
pathOut=u'/Volumes/Pitcairn/seaicePPF/northernHemisphere/analysisOutput/'
pathIn=u'/Volumes/Pitcairn/seaicePPF/p/cesm0005/CESM-CAM5-BGC-LE/ice/proc/tseries/daily/aice_d/'
pathIn45=u'/Volumes/Pitcairn/seaicePPF/p/cesm0005/CESM-CAM5-BGC-ME/ice/proc/tseries/daily/aice_d/'
# find all files (nh) limits us to just the northern hemisphere
dirList=glob.glob(pathIn45+'b*h*.nc')
# hard code in the names for the run parts
bgKey=u'B1850C5CN'
runPart1key=u'B20TRC5CNBDRD'
runParts23key=u'BRCP85C5CNBDRD'
runParts23keyRCP45=u'BRCP45C5CNBDRD'
owThresh=15 # sea ice concentration for "open Water"
daysPerMonth=[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
cumDaysInYear=np.cumsum(daysPerMonth)
## select unique runs (for RCP 4.5 remove this because all are unique)
#uniqueRuns=[]
#for fn in dirList:
# pathSplit=fn.split('/')
# fnSplit=pathSplit[-1].split('.')
# if fnSplit[2]==bgKey: # if the control run
# uniqueRuns.append(fn)
# if fnSplit[2]==runPart1key:
# uniqueRuns.append(fn)
uniqueRuns=dirList
# make sure monthly average do not already exist
fileNames=[]
for fn in uniqueRuns:
pathSplit=fn.split('/')
fnSplit=pathSplit[-1].split('.')
if fnSplit[2]==bgKey: # if the control run
fnTemp=str.join(".", fnSplit[:-1])+'.timeseries.nc'
#if fnSplit[2]==runPart1key:
# fnSplit[2]=runPart1key+'-'+runParts23keyRCP45
# fnTemp=str.join(".", fnSplit[:-2])+'.timeseries.nc'
if fnSplit[2]==runParts23keyRCP45:
fnSplit[2]=runPart1key+'-'+runParts23keyRCP45
fnTemp=str.join(".", fnSplit[:-2])+'.timeseries.nc'
fn_monthOut=pathOut+fnTemp[:-2]+'monthlyAvg.nc'
if os.path.isfile(fn_monthOut)==False:
fileNames.append(fn)
print fn_monthOut
# process the remaining input files.
startTime = datetime.now()
for fn in fileNames:
try:
print 'Starting averaging of', fn
pathSplit=fn.split('/')
fnSplit=pathSplit[-1].split('.')
# open files, get aice_d, and time and put into variables with the same name
if fnSplit[2]==bgKey: # if the control run
fnTemp=str.join(".", fnSplit[:-1])+'.timeseries.nc'
f=nio.open_file(fn, 'r')
aice_d=f.variables['aice_d'][:,:,:]
time=f.variables['time'][:]
if fnSplit[2]==runParts23keyRCP45:
# now instead you need to open THREE FILES:
# first open the first one
f=nio.open_file(fn, 'r')
aice_d=f.variables['aice_d'][:,:,:]
time=f.variables['time'][:]
# now find the other two and add them to the end
pathConstruct2=pathIn45+'*'+runParts23keyRCP45+'*'+fnSplit[4]+'*'+fnSplit[7]+'*nc'
findFiles=np.sort(glob.glob(pathConstruct2))
for fother in findFiles:
print fother
f1=nio.open_file(fother, 'r')
time=np.append(time,f1.variables['time'][:], 0)
aice_d=np.append(aice_d, f1.variables['aice_d'][:,:,:], 0)
del f1
fnSplit[2]=runPart1key+'-'+runParts23keyRCP45
fnTemp=str.join(".", fnSplit[:-2])+'.timeseries.nc'
fn_monthOut=pathOut+fnTemp[:-2]+'monthlyAvg.nc'
# calculate the number of years and get the x-y dimension
numyears=time.size/365
ni=f.dimensions['ni']
nj=f.dimensions['nj']
area=f.variables['tarea'][:,:]
nummonths=numyears*12
# for each cell do the following create monthly averages of SIC and sea ice area
# initialize output
monthlyTimestep=np.zeros(numyears*12)
monthlyTimeBounds=np.zeros((numyears*12,2))
monthlyAvgSIC=np.zeros((numyears*12, nj, ni))
monthlyAvgSIA=np.zeros((numyears*12, 1))
monthlyAvgSIA_thresh=np.zeros((numyears*12, 1))
monthlyTimestep=monthlyTimestep.astype(np.float32)
monthlyTimeBounds=monthlyTimeBounds.astype(np.float32)
monthlyAvgSIC=monthlyAvgSIC.astype(np.float32)
monthlyAvgSIA=monthlyAvgSIA.astype(np.float32)
monthlyAvgSIA_thresh=monthlyAvgSIA_thresh.astype(np.float32)
# loop through each month.
arrayItter=0
for i in range(numyears):
#print 'year = ',i+int(t[0]/365)
for j in range(12):
indStart=int(i*365+np.remainder(cumDaysInYear[j-1], 365))
indStop=int(i*365+cumDaysInYear[j]-1)
# select the timestamps and ice concentrations for the current month
selt=time[indStart:indStop]
selIce=aice_d[indStart:indStop,:,:]
# calculate the mean ice concentration
meanIce=selIce.mean(axis=0)
meanIce[selIce[0,:,:]==f.variables['aice_d'].__dict__['_FillValue']]=f.variables['aice_d'].__dict__['_FillValue']
monthlyAvgSIC[arrayItter,:,:]=meanIce
# calculate the mean ice extent
sia=[]
sia_thresh=[]
conversion=1000000.*1000000.
for jj in range(selIce.shape[0]):
selice2=selIce[jj,:,:]
selice2[selice2>1e29]=0
temparea=area
temparea[temparea>1e29]=0
temparea=temparea/conversion
temparea[selice2==0]=0
sia.append((np.sum((selice2/100.)*(temparea))))
sia_thresh.append((np.sum((selice2>owThresh)*(temparea))))
# save values into output structures.
monthlyAvgSIA[arrayItter] = np.mean(sia) # 100% is represted ast 100 instead of 1
monthlyAvgSIA_thresh[arrayItter] = np.mean(sia_thresh)
monthlyTimestep[arrayItter]=round(selt.mean(axis=0))
monthlyTimeBounds[arrayItter, 0]=selt.min(axis=0)
monthlyTimeBounds[arrayItter, 1]=selt.max(axis=0)
arrayItter+=1
del time
del aice_d
## Create this monthly averaged file as a new netcdf
fMonth=Dataset(fn_monthOut, 'w',format='NETCDF4')
# create all the dimentions, set time to unlimited
for k in f.dimensions.keys():
if f.unlimited(k)==True:
fMonth.createDimension(k, monthlyTimestep.size)#None)
else:
fMonth.createDimension(k, f.dimensions[k])
print k, f.dimensions[k]
# use the netCDF4 instead of pyNIO since it seems to work much better with unlimited variables
fMonthVars={}
for key in {'TLAT', 'TLON','latt_bounds','lont_bounds','time_bounds', 'time'}:
#print 'creating ', key
# the netCDF4 module requires that if a fill value exists, it must be declared when the variable is created.
try:
fMonthVars[key]=fMonth.createVariable(key, f.variables[key].typecode(), f.variables[key].dimensions, fill_value=f.variables[key].__dict__['_FillValue'])
except:
fMonthVars[key]=fMonth.createVariable(key, f.variables[key].typecode(), f.variables[key].dimensions,fill_value=f.variables['aice_d'].__dict__['_FillValue'])
# sett all the attribute keys.
atts = f.variables[key].__dict__
for attKey in atts.keys():
if attKey != '_FillValue':
setattr(fMonth.variables[key],attKey,atts[attKey])
# create the montly averaged sea ice variable
monthAvgKey='aice_d_monthAvg'
fMonthVars[monthAvgKey]=fMonth.createVariable(monthAvgKey, f.variables['aice_d'].typecode(), f.variables['aice_d'].dimensions,fill_value=f.variables['aice_d'].__dict__['_FillValue'])
#print 'creating aice_d_monthAvg'
atts = f.variables['aice_d'].__dict__
for attKey in atts.keys():
if attKey is not '_FillValue':
setattr(fMonth.variables[monthAvgKey],attKey,atts[attKey])
setattr(fMonth.variables[monthAvgKey],'long_name','mean monthly sea ice concentration (aggregate)')
monthAvgKey='monthlyAvgSIA'
fMonthVars[monthAvgKey]=fMonth.createVariable(monthAvgKey, f.variables['aice_d'].typecode(), 'time',fill_value=f.variables['aice_d'].__dict__['_FillValue'])
#print 'creating aice_d_monthAvg'
atts = f.variables['aice_d'].__dict__
for attKey in atts.keys():
if attKey is not '_FillValue':
setattr(fMonth.variables[monthAvgKey],attKey,atts[attKey])
setattr(fMonth.variables[monthAvgKey],'long_name','mean monthly ice area (aggregate)')
setattr(fMonth.variables[monthAvgKey],'units','million square kilometers')
monthAvgKey='monthlyAvgSIA_thresh'
fMonthVars[monthAvgKey]=fMonth.createVariable(monthAvgKey, f.variables['aice_d'].typecode(), 'time',fill_value=f.variables['aice_d'].__dict__['_FillValue'])
#print 'creating aice_d_monthAvg'
atts = f.variables['aice_d'].__dict__
for attKey in atts.keys():
if attKey is not '_FillValue':
setattr(fMonth.variables[monthAvgKey],attKey,atts[attKey])
setattr(fMonth.variables[monthAvgKey],'long_name','mean monthly ice area (using 15% threshold) (aggregate)')
setattr(fMonth.variables[monthAvgKey],'units','million square kilometers')
# put data into variables, first the ones we are copying over.
#print 'putting data into standard variables'
for key in {'TLAT', 'TLON','latt_bounds','lont_bounds'}:
fMonthVars[key][:,:]=f.variables[key][:]
# now, the ones we have created (those with time as a dimention)
fMonthVars['time'][:]=monthlyTimestep
fMonthVars['time_bounds'][:,:]=monthlyTimeBounds
fMonthVars['aice_d_monthAvg'][:,:,:]=monthlyAvgSIC
fMonthVars['monthlyAvgSIA'][:]=monthlyAvgSIA
fMonthVars['monthlyAvgSIA_thresh'][:]=monthlyAvgSIA_thresh
# close and delete the output netCDF variable, retain f, so we can do the next part of the analysis.
fMonth.close()
del fMonth
print'finished averaging of ',fn , (datetime.now()-startTime)
except:
print 'oops... ', fn, 'didnt run' , (datetime.now()-startTime) | [
"[email protected]"
] | |
01dca87891811dc9c80df4ec667f35a0d253a385 | 2d997384a86f0d9c0cdb80a1141908abfdf674cc | /ML_homework8/task.py | 3a7d666193d64500051f875d33be8500c66db5a8 | [] | no_license | Alice-Avetisyan/Machine_Learning | 1ddc91fad066f3abf0457d036aa783f0fc40a46f | 9a0cc83c6d90ef58703a383f066ef857bb124334 | refs/heads/master | 2021-01-16T09:07:20.435898 | 2020-06-18T08:44:17 | 2020-06-18T08:44:17 | 243,054,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,678 | py | from sklearn.datasets import make_classification
# Generate a random n-class classification problem
X, y = make_classification(100, 3, 2, 1, class_sep=0.5) # 2 of 3 features are informative and 1 is redundant
# 100 -> number of samples/rows,
# 3 -> number of features/columns,
# 2 -> number of informative features,
# 1 -> number of redundant features (useless data)
# class_sep -> the complexity if the model
import matplotlib.pyplot as plt
# plt.hist(X[:, 1]) # all rows of the second column
# plt.show()
# plt.scatter(X[:, 0], X[:, 1])
# plt.show()
fig = plt.figure()
axis1 = fig.add_subplot(1, 2, 1)
axis1.hist(X[:, 1])
axis2 = fig.add_subplot(1, 2, 2)
axis2.scatter(X[:, 0], X[:, 1])
plt.show()
# plots the class distribution
for i in range(len(X)):
if y[i] == 0:
plt.scatter(X[i, 0], X[i, 1], marker='*', color='b')
else:
plt.scatter(X[i, 0], X[i, 1], marker='D', color='r')
plt.show()
from sklearn.svm import SVC
svc_model = SVC(kernel='rbf')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=101)
svc_model.fit(X_train, y_train)
from sklearn.metrics import accuracy_score
y_pred = svc_model.predict(X_test)
print("Model Accuracy: ", accuracy_score(y_test, y_pred))
# converting the data into DataFrame
import pandas as pd
custom_df = pd.DataFrame(X, columns=['X1', 'X2', 'X3'])
custom_df.insert(len(custom_df.columns), 'y', pd.DataFrame(y))
print(custom_df)
# turning the data into a csv file
custom_df.to_csv('custom_data.csv', index=False)
csv = pd.read_csv('custom_data.csv')
print(csv) | [
"[email protected]"
] | |
9e30f6dd5e43574bbc8c96b3976c5ed164f00864 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/vehicle_systems/components/__init__.py | 057145a5b192b33ecbef57d0bc595f52f45d7640 | [] | no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 132 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/vehicle_systems/components/__init__.py
pass
| [
"[email protected]"
] | |
10ebb5a33de3a78479eeeeab0075a4ed9b9b5b16 | bcb56cc126ea1885eb5ecc920884e2e331def045 | /Part B/Letter.py | 5536de9080655ae9b4a8c2bdfc7f925462a11551 | [] | no_license | priyanshkedia04/Codeforces-Solutions | 2d11cb7b8329fe658f983b7212c17fc89fd784f0 | a5197c633bf4c3238f48bfb5b308144c2ffba473 | refs/heads/main | 2023-06-06T13:10:13.787843 | 2021-07-01T14:06:52 | 2021-07-01T14:06:52 | 382,000,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | from collections import Counter
s1 = dict(Counter(list(input())))
s2 = dict(Counter(list(input())))
count = 0
if ' ' in s2:
del s2[' ']
for i in s2:
if i in s1 and i:
if s1[i] >= s2[i]:
count += 1
if count == len(s2):
print("YES")
else:
print("NO") | [
"[email protected]"
] | |
dab8b347c67f8225bb55fa6570fe28846ab87f79 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/101/usersdata/227/49714/submittedfiles/av1_m3.py | 24617831cbcfc8ae60100af176c4d6c910776e9e | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | # -*- coding: utf-8 -*-
import math
m=int(input('digite o número de termos:'))
a=4
pi=0
for i in range(2,m+1,2):
b=i+1
c=b+1
pi=3+(a/(i*b*c)
print('%.6d'%pi)
| [
"[email protected]"
] | |
2d87cfb2050dc123dda3a9e59dec3db88a7322cd | cf03974cf92b11db7c52d7aec87b4c856feba215 | /hungry.py | 7a043473d6bca288b13395ad5e69c900846611e1 | [] | no_license | kkirankumar9/test | 80164594d4f1df97effa3df8b7a19e8f79817c76 | 166e77e17675bfa873c38ade145aa115a786be55 | refs/heads/main | 2023-06-21T10:27:55.815853 | 2021-07-31T13:59:12 | 2021-07-31T13:59:12 | 387,144,634 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | i=input("are you hungry")
if i=='yes':
print("eat pizza")
else:
print("do work") | [
"[email protected]"
] | |
f1089e8283bf6ef3416226ce279c6d214d5d825b | 5ff12b8402f45a945ef1d33235ecb9c85fb20fe1 | /assignment2/cs231n/classifiers/fc_net.py | 86b2053377b423a96411375ec289b26feb92ac93 | [
"MIT"
] | permissive | strategist922/cs231n | 6cc5c82d6df210828f1a3004c964941ae2775fd1 | 592aa76417341728759900b580141df333a5b46c | refs/heads/master | 2020-04-01T09:17:02.570479 | 2018-08-08T05:32:16 | 2018-08-08T05:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,156 | py | from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.layer_utils import *
class TwoLayerNet(object):
"""
A two-layer fully-connected neural network with ReLU nonlinearity and
softmax loss that uses a modular layer design. We assume an input dimension
of D, a hidden dimension of H, and perform classification over C classes.
The architecure should be affine - relu - affine - softmax.
Note that this class does not implement gradient descent; instead, it
will interact with a separate Solver object that is responsible for running
optimization.
The learnable parameters of the model are stored in the dictionary
self.params that maps parameter names to numpy arrays.
"""
def __init__(self, input_dim=3*32*32, hidden_dim=100, num_classes=10,
weight_scale=1e-3, reg=0.0):
"""
Initialize a new network.
Inputs:
- input_dim: An integer giving the size of the input
- hidden_dim: An integer giving the size of the hidden layer
- num_classes: An integer giving the number of classes to classify
- dropout: Scalar between 0 and 1 giving dropout strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- reg: Scalar giving L2 regularization strength.
"""
self.params = {}
self.reg = reg
############################################################################
# TODO: Initialize the weights and biases of the two-layer net. Weights #
# should be initialized from a Gaussian with standard deviation equal to #
# weight_scale, and biases should be initialized to zero. All weights and #
# biases should be stored in the dictionary self.params, with first layer #
# weights and biases using the keys 'W1' and 'b1' and second layer weights #
# and biases using the keys 'W2' and 'b2'. #
############################################################################
self.params['W1'] = np.random.randn(input_dim, hidden_dim)\
* weight_scale
self.params['b1'] = np.zeros(hidden_dim)
self.params['W2'] = np.random.randn(hidden_dim, num_classes)\
* weight_scale
self.params['b2'] = np.zeros(num_classes)
############################################################################
# END OF YOUR CODE #
############################################################################
def loss(self, X, y=None):
"""
Compute loss and gradient for a minibatch of data.
Inputs:
- X: Array of input data of shape (N, d_1, ..., d_k)
- y: Array of labels, of shape (N,). y[i] gives the label for X[i].
Returns:
If y is None, then run a test-time forward pass of the model and return:
- scores: Array of shape (N, C) giving classification scores, where
scores[i, c] is the classification score for X[i] and class c.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar value giving the loss
- grads: Dictionary with the same keys as self.params, mapping parameter
names to gradients of the loss with respect to those parameters.
"""
scores = None
W1, W2, b1, b2 = self.params['W1'], self.params['W2'], \
self.params['b1'], self.params['b2']
############################################################################
# TODO: Implement the forward pass for the two-layer net, computing the #
# class scores for X and storing them in the scores variable. #
############################################################################
hidden, hidden_cache = affine_relu_forward(X, W1, b1)
scores, out_cache = affine_forward(hidden, W2, b2)
############################################################################
# END OF YOUR CODE #
############################################################################
# If y is None then we are in test mode so just return scores
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the two-layer net. Store the loss #
# in the loss variable and gradients in the grads dictionary. Compute data #
# loss using softmax, and make sure that grads[k] holds the gradients for #
# self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, dx3 = softmax_loss(scores, y)
loss += 0.5 * self.reg * (np.sum(W1 * W1) + np.sum(W2 * W2))
dx2, dw2, db2 = affine_backward(dx3, (hidden, W2, b2))
dx1, dw1, db1 = affine_relu_backward(dx2, hidden_cache)
grads['W1'] = dw1 + self.reg * W1
grads['W2'] = dw2 + self.reg * W2
grads['b1'] = db1
grads['b2'] = db2
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
class FullyConnectedNet(object):
"""
A fully-connected neural network with an arbitrary number of hidden layers,
ReLU nonlinearities, and a softmax loss function. This will also implement
dropout and batch normalization as options. For a network with L layers,
the architecture will be
{affine - [batch norm] - relu - [dropout]} x (L - 1) - affine - softmax
where batch normalization and dropout are optional, and the {...} block is
repeated L - 1 times.
Similar to the TwoLayerNet above, learnable parameters are stored in the
self.params dictionary and will be learned using the Solver class.
"""
def __init__(self, hidden_dims, input_dim=3*32*32, num_classes=10,
dropout=0, use_batchnorm=False, reg=0.0,
weight_scale=1e-2, dtype=np.float32, seed=None):
"""
Initialize a new FullyConnectedNet.
Inputs:
- hidden_dims: A list of integers giving the size of each hidden layer.
- input_dim: An integer giving the size of the input.
- num_classes: An integer giving the number of classes to classify.
- dropout: Scalar between 0 and 1 giving dropout strength. If dropout=0 then
the network should not use dropout at all.
- use_batchnorm: Whether or not the network should use batch normalization.
- reg: Scalar giving L2 regularization strength.
- weight_scale: Scalar giving the standard deviation for random
initialization of the weights.
- dtype: A numpy datatype object; all computations will be performed using
this datatype. float32 is faster but less accurate, so you should use
float64 for numeric gradient checking.
- seed: If not None, then pass this random seed to the dropout layers. This
will make the dropout layers deteriminstic so we can gradient check the
model.
"""
self.use_batchnorm = use_batchnorm
self.use_dropout = dropout > 0
self.reg = reg
self.num_layers = 1 + len(hidden_dims)
self.dtype = dtype
self.params = {}
############################################################################
# TODO: Initialize the parameters of the network, storing all values in #
# the self.params dictionary. Store weights and biases for the first layer #
# in W1 and b1; for the second layer use W2 and b2, etc. Weights should be #
# initialized from a normal distribution with standard deviation equal to #
# weight_scale and biases should be initialized to zero. #
# #
# When using batch normalization, store scale and shift parameters for the #
# first layer in gamma1 and beta1; for the second layer use gamma2 and #
# beta2, etc. Scale parameters should be initialized to one and shift #
# parameters should be initialized to zero. #
############################################################################
for i in range(self.num_layers):
if i == 0:
self.params['W1'] = np.random.randn(input_dim, hidden_dims[0])
self.params['W1'] *= weight_scale
self.params['b1'] = np.zeros(hidden_dims[0])
elif i == self.num_layers - 1:
self.params['W'+str(i+1)] = np.random.randn(hidden_dims[-1],
num_classes)
self.params['W'+str(i+1)] *= weight_scale
self.params['b'+str(i+1)] = np.zeros(num_classes)
else:
self.params['W'+str(i+1)] = np.random.randn(hidden_dims[i-1],
hidden_dims[i])
self.params['W'+str(i+1)] *= weight_scale
self.params['b'+str(i+1)] = np.zeros(hidden_dims[i])
############################################################################
# END OF YOUR CODE #
############################################################################
# When using dropout we need to pass a dropout_param dictionary to each
# dropout layer so that the layer knows the dropout probability and the mode
# (train / test). You can pass the same dropout_param to each dropout layer.
self.dropout_param = {}
if self.use_dropout:
self.dropout_param = {'mode': 'train', 'p': dropout}
if seed is not None:
self.dropout_param['seed'] = seed
# With batch normalization we need to keep track of running means and
# variances, so we need to pass a special bn_param object to each batch
# normalization layer. You should pass self.bn_params[0] to the forward pass
# of the first batch normalization layer, self.bn_params[1] to the forward
# pass of the second batch normalization layer, etc.
self.bn_params = []
if self.use_batchnorm:
self.bn_params = [{'mode': 'train'} for i in range(self.num_layers - 1)]
# Cast all parameters to the correct datatype
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Compute loss and gradient for the fully-connected net.
Input / output: Same as TwoLayerNet above.
"""
X = X.astype(self.dtype)
mode = 'test' if y is None else 'train'
# Set train/test mode for batchnorm params and dropout param since they
# behave differently during training and testing.
if self.dropout_param is not None:
self.dropout_param['mode'] = mode
if self.use_batchnorm:
for bn_param in self.bn_params:
bn_param['mode'] = mode
scores = None
############################################################################
# TODO: Implement the forward pass for the fully-connected net, computing #
# the class scores for X and storing them in the scores variable. #
# #
# When using dropout, you'll need to pass self.dropout_param to each #
# dropout forward pass. #
# #
# When using batch normalization, you'll need to pass self.bn_params[0] to #
# the forward pass for the first batch normalization layer, pass #
# self.bn_params[1] to the forward pass for the second batch normalization #
# layer, etc. #
############################################################################
out = []
cache = []
temp_out = X
for i in range(self.num_layers-1):
temp_out, affine_cache = affine_relu_forward(
temp_out,
self.params['W'+str(i+1)],
self.params['b'+str(i+1)])
out.append(temp_out)
cache.append(affine_cache)
temp_out, affine_cache = affine_forward(
temp_out,
self.params['W'+str(self.num_layers)],
self.params['b'+str(self.num_layers)]
)
out.append(temp_out)
cache.append(affine_cache)
scores = out[-1]
############################################################################
# END OF YOUR CODE #
############################################################################
# If test mode return early
if mode == 'test':
return scores
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the backward pass for the fully-connected net. Store the #
# loss in the loss variable and gradients in the grads dictionary. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# When using batch normalization, you don't need to regularize the scale #
# and shift parameters. #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
loss, d_loss = softmax_loss(scores, y)
weight_norm = 0
for i in range(self.num_layers):
weight_norm += np.sum(self.params['W'+str(i+1)] ** 2)
loss += 0.5 * self.reg * weight_norm
#====================backpropagation
for i in np.arange(self.num_layers, 0, -1):
if i == self.num_layers:
d_affine = affine_backward(d_loss, cache[i-1])
else:
d_affine = affine_relu_backward(dout, cache[i-1])
dout = d_affine[0]
idx = str(i)
grads['W'+idx] = d_affine[1]
grads['b'+idx] = d_affine[2]
grads['W'+idx] += self.reg * self.params['W'+idx]
###########################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
| [
"[email protected]"
] | |
dfce2e36e00f6072dad0dd1363e621c2a3ac5e08 | 2635c2e2c31a7badb8b188306c3cdfc61dc1ecc8 | /versiones_anteriores_historico/ihm_ant_historico_precios/models/purchase_order_lines.py | 58305e8b480fabd0953f97d4aaea24313293aa7c | [] | no_license | rosalesdc/ihm_testing | ec4ebf26c3c7602267a04fd183de4064f9d16bc1 | d91ebeac5504c9f29a21b2b0f05bc16ed240ff48 | refs/heads/master | 2020-04-17T01:21:03.214595 | 2019-10-29T23:05:52 | 2019-10-29T23:05:52 | 166,088,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 105 | py | class purchaseOrderLines(models.Model):
_inherit = "purchase.order.lines"
def genera_reporte | [
"[email protected]"
] | |
464aca6bdb71bea101e69f9172bcaeea0fdf5dee | a485f01fd697721356d4405dfef569c50499d652 | /SipMask-mmdetection/configs/sipmask/sipmask++_r101_caffe_fpn_ssd_6x.py | dabccfd91bdc17e15ea79c812b84436a3eeb3192 | [
"MIT",
"Apache-2.0"
] | permissive | Borda/SipMask | 653333c1c7a7b5e9d0779c28f1b86d17b623aa5f | bc63fa93f9291d7b664c065f41d937a65d3c72fd | refs/heads/master | 2023-05-25T11:11:44.452534 | 2021-03-26T02:47:49 | 2021-03-26T02:47:49 | 299,910,001 | 1 | 0 | MIT | 2020-09-30T12:22:24 | 2020-09-30T12:22:23 | null | UTF-8 | Python | false | false | 4,553 | py | # model settings
model = dict(
type='SipMask',
pretrained='open-mmlab://resnet101_caffe',
backbone=dict(
type='ResNet',
depth=101,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
style='caffe',
dcn=dict(type='DCN', deformable_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, True, True, True)),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs=True,
extra_convs_on_inputs=False, # use P5
num_outs=5,
relu_before_extra_convs=True),
bbox_head=dict(
type='SipMaskHead',
num_classes=81,
in_channels=256,
stacked_convs=2,
ssd_flag=True,
norm_cfg=None,
rescoring_flag = True,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='IoULoss', loss_weight=1.0),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
center_sampling=True,
center_sample_radius=1.5))
# training and testing settings
train_cfg = dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False)
test_cfg = dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.1,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(576, 576), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(544, 544),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=8,
workers_per_gpu=3,
train=dict(
type='RepeatDataset',
times=3,
dataset=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline)),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict()
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[20, 23])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/sipmask++_r101_caffe_fpn_6x'
load_from = None
resume_from = None
workflow = [('train', 1)] | [
"[email protected]"
] | |
8d09c7578019ad5f22cf3b2ab7e4e74eaa4c0bbe | f3ccd2cf5c1819cf6b2b296a134a59a58deb87a6 | /03img_classify/classify.py | a1bbe880c582e1d737c4fb08b515be56ded6347b | [] | no_license | leebinjun/gaze_tracking_ARglasses | 195120a17a0e4858f4cdfc9516e781567f091fb0 | 63841b565f6fbb16f788268fb1ef991df0142b6b | refs/heads/master | 2020-07-27T01:50:35.800129 | 2019-12-18T03:05:15 | 2019-12-18T03:05:15 | 208,825,449 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | import tensorflow as tf
import numpy as np
import cv2
uid_to_human = {}
for line in tf.gfile.GFile('imagenet_synset_to_human_label_map.txt').readlines():
items = line.strip().split('\t')
uid_to_human[items[0]] = items[1]
node_id_to_uid = {}
for line in tf.gfile.GFile('imagenet_2012_challenge_label_map_proto.pbtxt').readlines():
if line.startswith(' target_class:'):
target_class = int(line.split(': ')[1])
if line.startswith(' target_class_string:'):
target_class_string = line.split(': ')[1].strip('\n').strip('\"')
node_id_to_uid[target_class] = target_class_string
node_id_to_name = {}
for key, value in node_id_to_uid.items():
node_id_to_name[key] = uid_to_human[value]
def create_graph():
with tf.gfile.FastGFile('classify_image_graph_def.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def classify_image(image, top_k=2):
image_data = tf.gfile.FastGFile(image, 'rb').read()
# print(image_data)
# image_data = cv2.imread(image)
create_graph()
with tf.Session() as sess:
# 'softmax:0': A tensor containing the normalized prediction across 1000 labels
# 'pool_3:0': A tensor containing the next-to-last layer containing 2048 float description of the image
# 'DecodeJpeg/contents:0': A tensor containing a string providing JPEG encoding of the image
softmax_tensor = sess.graph.get_tensor_by_name('softmax:0')
predictions = sess.run(softmax_tensor, feed_dict={'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-top_k:]
for node_id in top_k:
human_string = node_id_to_name[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
classify_image('IMG_20190917_120404.jpg') | [
"[email protected]"
] | |
4cbbc8616a600058c184f711b2e71766118ee132 | 2c8c7617d98f0349e560291960ecc5fb831bc0af | /programmers/level1/min/최대공약수와최소공배수.py | 67ebc70fa0b925142de49537b9ce963b435f15d2 | [] | no_license | BU-PS/coding_test | e53d9680ae80f32bfb5238795e868d3b37e5dd71 | c4fbd5034c8f507a858ca021cc7f6cfcf43f402a | refs/heads/master | 2023-03-15T05:26:36.566092 | 2021-03-25T12:36:40 | 2021-03-25T12:36:40 | 316,865,325 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 937 | py | # 최대 공약수 (GCD) : 두 개 이상의 자연수의 공통인 약수 중 가장 큰 수
# 1. 최대 공약수를 구하는 법
# - 두수의 약수들을 구한다
# - 두수의 약수들을 집합(set)에 넣는다
# - 교집합을 통해 공약수를 찾는다
# - 교집합을 중 가장 큰 수를 찾는다
# 최소 공배수 (LCM) : 두 수의 공배수가 최소인
# 1. 최소 공배수를 구하는 법
# - N * M = L * C 의 식을 통해 값을 구한
def solution(n: int, m: int):
gcd_value = gcd(n=n, m=m)
lcm_value = lcm(n=n, m=m, g=gcd_value)
return [gcd_value, lcm_value]
def gcd(n: int, m: int):
max_value = max([n, m])
n_cm = set()
m_cm = set()
for i in range(1, max_value + 1):
if n % i == 0:
n_cm.add(i)
if m % i == 0:
m_cm.add(i)
return max(n_cm & m_cm)
def lcm(n: int, m: int, g: int):
return n * m // g
solution(4512, 18)
| [
"[email protected]"
] | |
27e83ea7ad44899703c2d61c2941e9dcef77cdd2 | 8e583ac7e8a2047f01fa6e9829f9de36022c3265 | /lib/python/gooey/python_bindings/gooey_parser.py | 95e2b888765693f41f424ea3c8819bc2d20689f2 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jonathanlurie/timelapseComposer | ef25c5623d19024e5f83ad6c236497fdcffca10d | 8de9f1ca626419bacb11bf6c563e79d52fb16a8d | refs/heads/master | 2021-01-10T09:18:58.517707 | 2015-05-25T18:58:40 | 2015-05-25T18:58:40 | 36,248,988 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,176 | py |
from argparse import ArgumentParser
class GooeyParser(object):
def __init__(self, **kwargs):
self.__dict__['parser'] = ArgumentParser(**kwargs)
self.widgets = {}
@property
def _mutually_exclusive_groups(self):
return self.parser._mutually_exclusive_groups
@property
def _actions(self):
return self.parser._actions
@property
def description(self):
return self.parser.description
def add_argument(self, *args, **kwargs):
widget = kwargs.pop('widget', None)
self.parser.add_argument(*args, **kwargs)
self.widgets[self.parser._actions[-1].dest] = widget
def add_mutually_exclusive_group(self, **kwargs):
return self.parser.add_mutually_exclusive_group(**kwargs)
def add_argument_group(self, *args, **kwargs):
return self.parser.add_argument_group(*args, **kwargs)
def parse_args(self, args=None, namespace=None):
return self.parser.parse_args(args, namespace)
def __getattr__(self, item):
return getattr(self.parser, item)
def __setattr__(self, key, value):
return setattr(self.parser, key, value)
| [
"[email protected]"
] | |
3bec992595116b04adc9b11ce51ab2e1693e2a4b | 38c8cca903432a88a6141dab4b9ac24740ae9e39 | /src/crike_django/manage.py | 8937161661f10d2505a8b82f54ecd20f227091b2 | [
"Apache-2.0"
] | permissive | mxwww/crike | 3d37882e75a4f7170d183d2050d6a643a72f381b | 141bd1c9b37882f0369dd8231cdf3576eeb7a5e1 | refs/heads/master | 2023-07-19T08:09:25.806263 | 2016-06-11T05:30:46 | 2016-06-11T05:30:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crike_django.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
a2190e26fa997ddb6b13f10b274f5d200b6e3918 | de6fb3a55196b6bd36a4fda0e08ad658679fb7a1 | /vt_manager/src/python/vt_manager/models/utils/Choices.py | 8b045af44501156cda604d3c0cf5c53e35f5078b | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | dana-i2cat/felix | 4a87af639e4c7db686bfa03f1ae4ce62711615e3 | 059ed2b3308bda2af5e1942dc9967e6573dd6a53 | refs/heads/master | 2021-01-02T23:12:43.840754 | 2016-02-04T10:04:24 | 2016-02-04T10:04:24 | 17,132,912 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,606 | py |
class VirtTechClass:
VIRT_TECH_TYPE_XEN = "xen"
VIRT_TECH_CHOICES = (
(VIRT_TECH_TYPE_XEN, 'XEN'),
)
@staticmethod
def validateVirtTech(value):
for tuple in VirtTechClass.VIRT_TECH_CHOICES:
if value in tuple:
return
raise Exception("Virtualization Type not valid")
class OSDistClass():
OS_DIST_TYPE_DEBIAN = "Debian"
OS_DIST_TYPE_UBUNTU = "Ubuntu"
OS_DIST_TYPE_REDHAT = "RedHat"
OS_DIST_TYPE_CENTOS = "CentOS"
OS_DIST_CHOICES = (
(OS_DIST_TYPE_DEBIAN, 'Debian'),
(OS_DIST_TYPE_UBUNTU, 'Ubuntu'),
(OS_DIST_TYPE_REDHAT, 'RedHat'),
(OS_DIST_TYPE_CENTOS, 'CentOS'),
)
@staticmethod
def validateOSDist(value):
for tuple in OSDistClass.OS_DIST_CHOICES:
if value in tuple:
return
raise Exception("OS Distribution not valid")
class OSVersionClass():
OS_VERSION_TYPE_50 = "5.0"
OS_VERSION_TYPE_60 = "6.0"
OS_VERSION_TYPE_62 = "6.2"
OS_VERSION_TYPE_70 = "7.0"
OS_VERSION_CHOICES = (
(OS_VERSION_TYPE_50, '5.0'),
(OS_VERSION_TYPE_60, '6.0'),
(OS_VERSION_TYPE_62, '6.2'),
(OS_VERSION_TYPE_70, '7.0'),
)
@staticmethod
def validateOSVersion(value):
for tuple in OSVersionClass.OS_VERSION_CHOICES:
if value in tuple:
return
raise Exception("OS Version not valid")
class OSTypeClass():
OS_TYPE_TYPE_GNULINUX = "GNU/Linux"
OS_TYPE_TYPE_WINDOWS = "Windows"
OS_TYPE_CHOICES = (
(OS_TYPE_TYPE_GNULINUX, 'GNU/Linux'),
(OS_TYPE_TYPE_WINDOWS, 'Windows'),
)
@staticmethod
def validateOSType(value):
for tuple in OSTypeClass.OS_TYPE_CHOICES:
if value in tuple:
return
raise Exception("OS Type not valid")
| [
"[email protected]"
] | |
06e6ec8eeae855acf71b78ba670c6f33f3e7d563 | b144c5142226de4e6254e0044a1ca0fcd4c8bbc6 | /ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/cfm_c1742b75736db9d1da0fb731317ab337.py | 2553211aa330936731e6ce2d5a16da9385c4e481 | [
"MIT"
] | permissive | iwanb/ixnetwork_restpy | fa8b885ea7a4179048ef2636c37ef7d3f6692e31 | c2cb68fee9f2cc2f86660760e9e07bd06c0013c2 | refs/heads/master | 2021-01-02T17:27:37.096268 | 2020-02-11T09:28:15 | 2020-02-11T09:28:15 | 239,721,780 | 0 | 0 | NOASSERTION | 2020-02-11T09:20:22 | 2020-02-11T09:20:21 | null | UTF-8 | Python | false | false | 6,156 | py | # MIT LICENSE
#
# Copyright 1997 - 2019 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class Cfm(Base):
"""This object contains the configuration of the CFM protocol.
The Cfm class encapsulates a required cfm resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'cfm'
def __init__(self, parent):
super(Cfm, self).__init__(parent)
@property
def Bridge(self):
"""An instance of the Bridge class.
Returns:
obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bridge_d8b0c3589e6175e046e1a83cbe6f36b6.Bridge)
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.bridge_d8b0c3589e6175e046e1a83cbe6f36b6 import Bridge
return Bridge(self)
@property
def EnableOptionalLmFunctionality(self):
"""NOT DEFINED
Returns:
bool
"""
return self._get_attribute('enableOptionalLmFunctionality')
@EnableOptionalLmFunctionality.setter
def EnableOptionalLmFunctionality(self, value):
self._set_attribute('enableOptionalLmFunctionality', value)
@property
def EnableOptionalTlvValidation(self):
"""If true, the CFM protocol will validate optional TLVs present in CFM packets.
Returns:
bool
"""
return self._get_attribute('enableOptionalTlvValidation')
@EnableOptionalTlvValidation.setter
def EnableOptionalTlvValidation(self, value):
self._set_attribute('enableOptionalTlvValidation', value)
@property
def Enabled(self):
"""If true, the CFM protcol is enabled.
Returns:
bool
"""
return self._get_attribute('enabled')
@Enabled.setter
def Enabled(self, value):
self._set_attribute('enabled', value)
@property
def ReceiveCcm(self):
"""If true, the CFM protocol can receive CFM CCMs on this port.
Returns:
bool
"""
return self._get_attribute('receiveCcm')
@ReceiveCcm.setter
def ReceiveCcm(self, value):
self._set_attribute('receiveCcm', value)
@property
def RunningState(self):
"""The current running state of the CFM protocol.
Returns:
str(unknown|stopped|stopping|starting|started)
"""
return self._get_attribute('runningState')
@property
def SendCcm(self):
"""If true, the CFM protocol can send CFM CCMs from this port.
Returns:
bool
"""
return self._get_attribute('sendCcm')
@SendCcm.setter
def SendCcm(self, value):
self._set_attribute('sendCcm', value)
@property
def SuppressErrorsOnAis(self):
"""If true, the errors on AIS are suopressed.
Returns:
bool
"""
return self._get_attribute('suppressErrorsOnAis')
@SuppressErrorsOnAis.setter
def SuppressErrorsOnAis(self, value):
self._set_attribute('suppressErrorsOnAis', value)
def update(self, EnableOptionalLmFunctionality=None, EnableOptionalTlvValidation=None, Enabled=None, ReceiveCcm=None, SendCcm=None, SuppressErrorsOnAis=None):
"""Updates a child instance of cfm on the server.
Args:
EnableOptionalLmFunctionality (bool): NOT DEFINED
EnableOptionalTlvValidation (bool): If true, the CFM protocol will validate optional TLVs present in CFM packets.
Enabled (bool): If true, the CFM protcol is enabled.
ReceiveCcm (bool): If true, the CFM protocol can receive CFM CCMs on this port.
SendCcm (bool): If true, the CFM protocol can send CFM CCMs from this port.
SuppressErrorsOnAis (bool): If true, the errors on AIS are suopressed.
Raises:
ServerError: The server has encountered an uncategorized error condition
"""
self._update(locals())
def Start(self):
"""Executes the start operation on the server.
Starts the CFM protocol on a port or group of ports.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('start', payload=payload, response_object=None)
def Stop(self):
"""Executes the stop operation on the server.
Stops the CFM protocol on a port or group of ports.
Raises:
NotFoundError: The requested resource does not exist on the server
ServerError: The server has encountered an uncategorized error condition
"""
payload = { "Arg1": self.href }
return self._execute('stop', payload=payload, response_object=None)
| [
"[email protected]"
] | |
3093186e46c96765d0b51554468a7761c5484e8e | a72e79b8caa43e973e7d7ecb7ffdaba15314bb9f | /server/wtpodcast2/feeds/whatsnew/urls.py | d16a486de32ebec8b741e4bcd3163578dee8596f | [] | no_license | crgwbr/wt-podcast2 | 2e4be9a0ffa8675d8283f3d0cc16adc799acac68 | a2dfb178b5e4c3e9ac5ab9ef7c13669caf50129c | refs/heads/master | 2022-12-24T07:42:47.599582 | 2020-10-08T15:35:52 | 2020-10-08T15:35:52 | 266,866,346 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 146 | py | from django.urls import path
from . import views
app_name = 'whatsnew'
urlpatterns = [
path('feed.rss', views.feed_rss, name='feed_rss'),
]
| [
"[email protected]"
] | |
f7a4a5e6467ee184d150cffb9fae09e625703666 | 1af44bdcbc3c15d3f6e436a7924dfd45f504ab3a | /01.jump to python/02.Data Science/1. collection/6. App/Scheduler_example.py | 25345e1d5cf5e270c0a09dd38d9fcbcdd11abc17 | [] | no_license | wql7654/bigdata_exam | f57c8b475690cbc5978009dbf8008bedff602e2a | c07ee711bb84407428ba31165185b9607b6825e8 | refs/heads/master | 2023-04-07T00:50:59.563714 | 2021-05-25T02:46:43 | 2021-05-25T02:46:43 | 180,915,985 | 0 | 0 | null | 2023-03-25T01:08:09 | 2019-04-12T02:36:08 | Jupyter Notebook | UTF-8 | Python | false | false | 998 | py | import threading
import time
g_Balcony_windows=False
g_AI_Mode=False
def updata_scheduler():
global g_Balcony_windows
while True:
if g_AI_Mode == False:
continue
else:
time.sleep(5)
g_Balcony_windows=not g_Balcony_windows
t= threading.Thread(target=updata_scheduler)
t.daemon=True
t.start()
while True:
print("메뉴를 선택하세요")
print("1. 장비 상태 조회")
print("2. 인공지능 모드 변경")
print("3. 종료")
menu_num= int(input("메뉴 입력: "))
if(menu_num==1):
print("발코니(베란다) 창문: ",end='')
if g_Balcony_windows==True:
print("열림")
else:
print("닫힘")
elif(menu_num==2):
print("인공지능 모드: ", end='')
g_AI_Mode=not g_AI_Mode
if g_AI_Mode==True:
print("작동")
else:
print("정지")
else:
break | [
"[email protected]"
] | |
0864304fb6f9996499fcb687bf16c415b3d12c7e | 938d5d26c0346316a10a74520b7e30b1bb1f6893 | /oncopolicy/utils/generic.py | 4a1dcb70ea43341de423c68976e0cc57c3119a36 | [
"MIT"
] | permissive | yala/Tempo | 46fe0da5a6e2e1a8b9bc855851e7ff9a3ab63bd6 | bf3e0e78d64869bb2079c582a4a35982f78386ad | refs/heads/main | 2023-04-17T07:04:34.697607 | 2022-01-13T21:03:04 | 2022-01-13T21:03:04 | 419,388,269 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,560 | py | import datetime
import hashlib
import numpy as np
from copy import deepcopy
import torch
import pdb
INVALID_DATE_STR = "Date string not valid! Received {}, and got exception {}"
ISO_FORMAT = '%Y-%m-%d %H:%M:%S'
CGMH_ISO_FORMAT ='%Y%m%d'
DAYS_IN_YEAR = 365
DAYS_IN_MO = 30
MAX_MO_TO_CANCER = 1200
MIN_MO_TO_CANCER = 3
MAX_PREFERNCES = 10.0
MIN_PREFERNCES = 0
EPSILON = 1e-3
AVG_MOMENTUM = 0.95
NUM_DIM_AUX_FEATURES = 7 ## Deprecated
class AverageMeter():
def __init__(self):
self.avg = 0
self.first_update = True
def reset(self):
self.avg = 0
self.first_update = True
def update(self, val_tensor):
val = val_tensor.item()
if self.first_update:
self.avg = val
self.first_update = False
else:
self.avg = (AVG_MOMENTUM * self.avg) + (1-AVG_MOMENTUM) * val
assert self.avg >= 0 and val >= 0
def get_aux_tensor(tensor, args):
## use of auxillary features for screen is deprecated
return torch.zeros([tensor.size()[0], NUM_DIM_AUX_FEATURES]).to(tensor.device)
def to_numpy(tensor):
return tensor.cpu().numpy()
def to_tensor(arr, device):
return torch.Tensor(arr).to(device)
def sample_preference_vector(batch_size, sample_random, args):
if sample_random:
dist = torch.distributions.uniform.Uniform(MIN_PREFERNCES, MAX_PREFERNCES)
preferences = dist.sample([batch_size, len(args.metrics), 1])
else:
preferences = torch.ones(batch_size, len(args.metrics), 1)
preferences *= torch.tensor(args.fixed_preference).unsqueeze(0).unsqueeze(-1)
preferences = preferences + EPSILON
preferences = (preferences / (preferences).sum(dim=1).unsqueeze(-1))
return preferences.to(args.device)
def normalize_dictionary(dictionary):
'''
Normalizes counts in dictionary
:dictionary: a python dict where each value is a count
:returns: a python dict where each value is normalized to sum to 1
'''
num_samples = sum([dictionary[l] for l in dictionary])
for label in dictionary:
dictionary[label] = dictionary[label]*1. / num_samples
return dictionary
def parse_date(iso_string):
'''
Takes a string of format "YYYY-MM-DD HH:MM:SS" and
returns a corresponding datetime.datetime obj
throws an exception if this can't be done.
'''
try:
return datetime.datetime.strptime(iso_string, ISO_FORMAT)
except Exception as e:
raise Exception(INVALID_DATE_STR.format(iso_string, e))
def md5(key):
'''
returns a hashed with md5 string of the key
'''
return hashlib.md5(key.encode()).hexdigest()
def pad_array_to_length(arr, pad_token, max_length):
arr = arr[:max_length]
return np.array( arr + [pad_token]* (max_length - len(arr)))
def fast_forward_exam_by_one_time_step(curr_exam, NUM_DAYS_IN_TIME_STEP):
exam = deepcopy(curr_exam)
est_date_of_last_followup = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_YEAR * curr_exam['years_to_last_followup']))
est_date_of_cancer = curr_exam['date'] + datetime.timedelta(days=int(DAYS_IN_MO * curr_exam['months_to_cancer']))
exam['date'] = curr_exam['date'] + datetime.timedelta(days=int(NUM_DAYS_IN_TIME_STEP))
exam['years_to_last_followup'] = (est_date_of_last_followup - exam['date']).days / DAYS_IN_YEAR
exam['months_to_cancer'] = (est_date_of_cancer - exam['date']).days / DAYS_IN_MO
exam['has_cancer'] = exam['months_to_cancer'] < MIN_MO_TO_CANCER
exam['time_stamp'] = curr_exam['time_stamp'] + 1
return exam
| [
"[email protected]"
] | |
9518f30afd0866dfa568b4f15f136dbab54fdeb8 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02705/s961392013.py | 735378c11733dcf7056fec7e375575cc6f489fe0 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 152 | py | # mathモジュールをインポート
import math
r = input().rstrip()
r = int(r)
# 円周率の近似値
x = math.pi
ans = (2 * r) * x
print(ans)
| [
"[email protected]"
] | |
79a50b733533cd6299691b654d5ce900ae38596f | de4449e4fbd2972a5a7e775e3a3c7a187ef86899 | /ubiops/models/pipeline_request_deploment_request.py | 4cab9039fd3b21f5a7110af758c322d56e311bb0 | [
"Apache-2.0"
] | permissive | egutierrez-ar/client-library-python | 03325cc1d4c3e949187889ceb404a08660a7f418 | 94177e5f175263bce645c15a171e54690b1e254f | refs/heads/master | 2023-01-22T23:41:40.274718 | 2020-11-19T07:34:36 | 2020-11-19T07:34:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,840 | py | # coding: utf-8
"""
UbiOps
Client Library to interact with the UbiOps API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ubiops.configuration import Configuration
class PipelineRequestDeplomentRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'request_id': 'str',
'pipeline_object': 'str',
'success': 'bool',
'request_data': 'object',
'result': 'object',
'error_message': 'str'
}
attribute_map = {
'request_id': 'request_id',
'pipeline_object': 'pipeline_object',
'success': 'success',
'request_data': 'request_data',
'result': 'result',
'error_message': 'error_message'
}
def __init__(self, request_id=None, pipeline_object=None, success=None, request_data=None, result=None, error_message=None, local_vars_configuration=None): # noqa: E501
"""PipelineRequestDeplomentRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._request_id = None
self._pipeline_object = None
self._success = None
self._request_data = None
self._result = None
self._error_message = None
self.discriminator = None
self.request_id = request_id
self.pipeline_object = pipeline_object
self.success = success
self.request_data = request_data
self.result = result
self.error_message = error_message
@property
def request_id(self):
"""Gets the request_id of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The request_id of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: str
"""
return self._request_id
@request_id.setter
def request_id(self, request_id):
"""Sets the request_id of this PipelineRequestDeplomentRequest.
:param request_id: The request_id of this PipelineRequestDeplomentRequest. # noqa: E501
:type: str
"""
self._request_id = request_id
@property
def pipeline_object(self):
"""Gets the pipeline_object of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The pipeline_object of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: str
"""
return self._pipeline_object
@pipeline_object.setter
def pipeline_object(self, pipeline_object):
"""Sets the pipeline_object of this PipelineRequestDeplomentRequest.
:param pipeline_object: The pipeline_object of this PipelineRequestDeplomentRequest. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pipeline_object is None: # noqa: E501
raise ValueError("Invalid value for `pipeline_object`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
pipeline_object is not None and len(pipeline_object) < 1):
raise ValueError("Invalid value for `pipeline_object`, length must be greater than or equal to `1`") # noqa: E501
self._pipeline_object = pipeline_object
@property
def success(self):
"""Gets the success of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The success of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this PipelineRequestDeplomentRequest.
:param success: The success of this PipelineRequestDeplomentRequest. # noqa: E501
:type: bool
"""
if self.local_vars_configuration.client_side_validation and success is None: # noqa: E501
raise ValueError("Invalid value for `success`, must not be `None`") # noqa: E501
self._success = success
@property
def request_data(self):
"""Gets the request_data of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The request_data of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: object
"""
return self._request_data
@request_data.setter
def request_data(self, request_data):
"""Sets the request_data of this PipelineRequestDeplomentRequest.
:param request_data: The request_data of this PipelineRequestDeplomentRequest. # noqa: E501
:type: object
"""
self._request_data = request_data
@property
def result(self):
"""Gets the result of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The result of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: object
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this PipelineRequestDeplomentRequest.
:param result: The result of this PipelineRequestDeplomentRequest. # noqa: E501
:type: object
"""
self._result = result
@property
def error_message(self):
"""Gets the error_message of this PipelineRequestDeplomentRequest. # noqa: E501
:return: The error_message of this PipelineRequestDeplomentRequest. # noqa: E501
:rtype: str
"""
return self._error_message
@error_message.setter
def error_message(self, error_message):
"""Sets the error_message of this PipelineRequestDeplomentRequest.
:param error_message: The error_message of this PipelineRequestDeplomentRequest. # noqa: E501
:type: str
"""
self._error_message = error_message
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PipelineRequestDeplomentRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PipelineRequestDeplomentRequest):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
95f6713504bf13a0bf73502b797efe5295597a01 | bcb71f3ad0196709d462330a60801d5f8ec92ea6 | /backend/blog/models.py | 52046c2bc34be06eb996c26a98cfd002333212d7 | [
"BSD-3-Clause"
] | permissive | lautarianoo/lautacademy | c7a7e84958fd6209415c16a0957a7e12449a9afc | beec082bdffe8c773fcec51974a687aced278a76 | refs/heads/master | 2023-05-28T20:22:22.718962 | 2021-06-11T17:19:32 | 2021-06-11T17:19:32 | 364,965,320 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,201 | py | from ckeditor_uploader.fields import RichTextUploadingField
from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils import timezone
from ckeditor.fields import RichTextField
from mptt.models import MPTTModel, TreeForeignKey
from django.dispatch import receiver
from django.db.models.signals import post_save
from backend.utils.transliteration import transliteration_rus_eng
from backend.utils.send_mail import send_mail_user_post
class BlogCategory(MPTTModel):
"""Класс модели категорий сетей"""
name = models.CharField("Категория", max_length=50)
published = models.BooleanField("Опубликовать?", default=True)
parent = TreeForeignKey(
'self',
verbose_name="Родительская категория",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='children')
slug = models.SlugField(max_length=100, blank=True, null=True, unique=True)
description = models.TextField("Description", max_length=300, default="")
class Meta:
verbose_name = "Категория"
verbose_name_plural = "Категории"
def __str__(self):
return self.name
class Tag(models.Model):
"""Класс модели тегов"""
name = models.CharField("Тег", max_length=50, unique=True, null=True)
slug = models.SlugField(max_length=100, blank=True, null=True)
class Meta:
verbose_name = "Тег"
verbose_name_plural = "Теги"
def __str__(self):
return self.name
class Post(models.Model):
"""Класс модели поста"""
author = models.ForeignKey(
User,
verbose_name="Автор",
on_delete=models.CASCADE)
title = models.CharField("Тема", max_length=500)
mini_text = models.TextField("Краткое содержание", max_length=5000)
text = models.TextField("Полное содержание", max_length=10000000)
created_date = models.DateTimeField("Дата создания", auto_now_add=True)
published_date = models.DateTimeField("Дата публикации", blank=True, null=True)
image = models.ImageField("Изображение", upload_to="blog/", blank=True)
tag = models.ManyToManyField(Tag, verbose_name="Тег", blank=True)
category = models.ForeignKey(
BlogCategory,
verbose_name="Категория",
blank=True,
null=True,
on_delete=models.SET_NULL)
published = models.BooleanField("Опубликовать?", default=True)
viewed = models.IntegerField("Просмотрено", default=0)
slug = models.SlugField(max_length=500, blank=True, null=True, unique=True)
description = models.TextField("Description", max_length=300, default="", null=True)
class Meta:
verbose_name = "Новость"
verbose_name_plural = "Новости"
ordering = ["-created_date"]
def publish(self):
self.published_date = timezone.now()
self.save()
def get_category_description(self):
return self.category.description
def get_absolute_url(self):
return reverse("single_post", kwargs={"category": self.category.slug, "slug": self.slug})
def save(self, *args, **kwargs):
self.slug = transliteration_rus_eng(self.title) + '-' + str(self.id)
super().save(*args, **kwargs)
def __str__(self):
return self.title
class Comment(MPTTModel):
"""Модель коментариев к новостям"""
user = models.ForeignKey(User, verbose_name="Пользователь", on_delete=models.CASCADE)
post = models.ForeignKey(Post, verbose_name="Новость", on_delete=models.CASCADE)
text = models.TextField("Сообщение", max_length=2000)
date = models.DateTimeField("Дата", auto_now_add=True)
update = models.DateTimeField("Изменен", auto_now=True)
parent = TreeForeignKey(
"self",
verbose_name="Родительский комментарий",
on_delete=models.CASCADE,
null=True,
blank=True,
related_name='children')
published = models.BooleanField("Опубликовать?", default=True)
class Meta:
verbose_name = "Комментарий"
verbose_name_plural = "Комментарии"
def __str__(self):
return "{} - {}".format(self.user, self.post)
class SpySearch(models.Model):
"""Модель отслеживания запросов поиска"""
record = models.CharField("Запрос", max_length=1000)
counter = models.PositiveIntegerField("Количество запросов", default=0)
class Meta:
verbose_name = "Запрос"
verbose_name_plural = "Запросы"
def __str__(self):
return "{}".format(self.record)
@receiver(post_save, sender=Post)
def create_user_post(sender, instance, created, **kwargs):
"""Отправка сообщения о предложенной статье на email"""
if created:
send_mail_user_post(instance)
| [
"neonchick1"
] | neonchick1 |
4e75a52bbb36cbac6858c29d1ab2d433f1f7071e | 169d809f45dedcaa3c7b1b49912d8b025abe18d9 | /challenge251_easy.py | 5d902f9cf7b2c9832252475e7fc7bf3834a08af4 | [] | no_license | bermec/challenges | 8a82d1d38d1ed1a0fc3f258443bc0054efc977a6 | 9fb092f20f12b4eaa808e758f00f482a49346c88 | refs/heads/master | 2021-10-08T05:05:56.803332 | 2018-12-08T00:20:20 | 2018-12-08T00:20:20 | 109,448,713 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,253 | py | '''
Nonograms, also known as Hanjie, Picross or Griddlers, are picture logic puzzles in
which cells in a grid must be colored or left blank according to numbers at the
side of the grid to reveal a hidden picture. In this puzzle type, the numbers are a
form of discrete tomography that measures how many unbroken lines of filled-in
squares there are in any given row or column.
In a Nonogram you are given the number of elements in the rows and columns. A row/column
where containing no element has a '0' all other rows/columns will have at least one number.
Each number in a row/column represent sets of elements next to each other.
If a row/column have multiple sets, the declaration of that row/column will have multiple
numbers. These sets will always be at least 1 cell apart.
An example
2 1 1
1 1 1 2 1
2 * *
1 2 * * *
0
2 1 * * *
2 * *
Input description
Today you will receive an image in ASCII with ' ' being empty and '*' being full.
The number of rows and columns will always be a multiple of 5.
*
**
* *
* *
*****
Output description
Give the columns and rows for the input
Columns:
1 1
1 2 1 1 5
Rows:
1
2
1 1
1 1
5
Ins
1
*
/|
/ |
/ |
*---*
2
/\ #
/**\#
/****\
/******\
/--------\
| |
| || # |
| || # |
| || |
*------*
Bonus
Place the columns and rows in a grid like you would give to a puzzler
1 1
1 2 1 1 5
1
2
1 1
1 1
5
'''
pattern = ''' *
/|
/ |
/ |
*---*'''
pattern = pattern.splitlines()
output = []
import re
for x in range(0, len(pattern)):
print()
ans = re.findall('\S\S\S\S\S|\S\S|\S', pattern[x])
temp = []
for item in ans:
len_item = len(item)
temp.append(str(len_item))
output.append(temp)
temp = []
N = len(output)
b = ''
c = []
for lst in output:
for item in lst:
b += item
b = b.rjust(2, ' ')
c.append(b)
b = ''
d = ' '
e = []
# width
M = len(c[0])
for x in range(0, M):
for y in range(0, len(c)):
d += c[y][x] + ' '
e.append(d)
print(d)
d = ' '
for x in range(0, N):
temp = c[x][0]
temp2 = c[x][1]
print(c[x][0], end='')
print('{: >2}'.format(c[x][1]))
| [
"[email protected]"
] | |
2982b152f2ef0916c17ae223e733483d0f455558 | fa9cc9cc469a3f0c5bdc0bc4e562dbbd3ff7e465 | /messages/RequestCloudMessage.py | cfd12e7df3126a5a7572dd9613f73bd6e7fc77a3 | [
"MIT"
] | permissive | zadjii/nebula | ddd86ea30791b46b2a1aeb000ae5dfea9a496168 | 50c4ec019c9f7eb15fe105a6c53a8a12880e281c | refs/heads/master | 2021-01-24T17:08:30.607634 | 2018-09-18T00:35:36 | 2018-09-18T00:35:36 | 36,847,552 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 829 | py | # last generated 2016-12-30 19:27:53.981000
from messages import BaseMessage
from msg_codes import REQUEST_CLOUD as REQUEST_CLOUD
__author__ = 'Mike'
class RequestCloudMessage(BaseMessage):
def __init__(self, id=None, cloud_uname=None, cname=None, username=None, passw=None):
super(RequestCloudMessage, self).__init__()
self.type = REQUEST_CLOUD
self.id = id
self.cloud_uname = cloud_uname
self.cname = cname
self.username = username
self.passw = passw
@staticmethod
def deserialize(json_dict):
msg = RequestCloudMessage()
msg.id = json_dict['id']
msg.cloud_uname = json_dict['cloud_uname']
msg.cname = json_dict['cname']
msg.username = json_dict['username']
msg.passw = json_dict['passw']
return msg
| [
"[email protected]"
] | |
de2366117fa19b7601ff66ec8e096b371bd033d7 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02379/s777602673.py | 78b06ae63e611e0c41787c5b51901c304d983aee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 120 | py | import math
x1,y1,x2,y2=map(float,input().split())
dx,dy=x2-x1,y2-y1
d=math.sqrt(dx*dx+dy*dy)
print("{:.8f}".format(d))
| [
"[email protected]"
] | |
68615c0335fc693397ae3712ddd889db8865a5ec | cee65c4806593554662330368c799c14ec943454 | /src/dms-preview/azext_dms/vendored_sdks/datamigration/models/project_task.py | f06065a5bf044dac5eb01d8d79ccba2c49e5d1d2 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | azclibot/azure-cli-extensions | d5d1a4ecdfc87fd79f5ad042fb85cdbf881897d2 | c230646258d4b56efb7d44eb7a0230f2943da6f6 | refs/heads/master | 2023-08-28T03:55:02.311902 | 2019-04-04T16:05:45 | 2019-04-04T16:05:45 | 179,548,695 | 1 | 1 | MIT | 2021-07-28T15:26:17 | 2019-04-04T17:54:39 | Python | UTF-8 | Python | false | false | 1,660 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
class ProjectTask(Resource):
"""A task resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:param etag: HTTP strong entity tag value. This is ignored if submitted.
:type etag: str
:param properties: Custom task properties
:type properties: ~azure.mgmt.datamigration.models.ProjectTaskProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ProjectTaskProperties'},
}
def __init__(self, **kwargs):
super(ProjectTask, self).__init__(**kwargs)
self.etag = kwargs.get('etag', None)
self.properties = kwargs.get('properties', None)
| [
"[email protected]"
] | |
e5c99083e888679c70abb0a62f231e39b7340f2b | 385a63d3c9e6f5815979165001f78ec3d7b90cd2 | /DrivingTDM_SetupMatlabOOP/headerAndFunctionsMotor/ximc/python-profiles/STANDA/8MT175V-150-VSS42.py | 36179aa2493ff6c8567ae576017af21d79a83175 | [
"BSD-2-Clause"
] | permissive | Rasedujjaman/matlabOOP | 5abb6ec94998fda5e9214ed94cf67a42bf243d4f | e1f025ab9b00a3646719df23852079736d2b5701 | refs/heads/main | 2023-07-23T21:40:53.905045 | 2021-08-31T16:12:39 | 2021-08-31T16:12:39 | 378,249,559 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,810 | py | def set_profile_8MT175V_150_VSS42(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_NONE
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 500
home_settings.uFastHome = 0
home_settings.SlowHome = 500
home_settings.uSlowHome = 0
home_settings.HomeDelta = 626
home_settings.uHomeDelta = 200
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_REV | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_DIR_SECOND
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 1000
move_settings.uSpeed = 0
move_settings.Accel = 2000
move_settings.Decel = 4000
move_settings.AntiplaySpeed = 1000
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 1
engine_settings.NomCurrent = 1200
engine_settings.NomSpeed = 4000
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON | EngineFlags_.ENGINE_REVERSE
engine_settings.Antiplay = 575
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 200
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_OFF_ENABLED | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
edges_settings.BorderFlags = BorderFlags_.BORDER_STOP_RIGHT | BorderFlags_.BORDER_STOP_LEFT
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SW2_ACTIVE_LOW | EnderFlags_.ENDER_SW1_ACTIVE_LOW | EnderFlags_.ENDER_SWAP
edges_settings.LeftBorder = 874
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 57874
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 100
control_settings.MaxSpeed[1] = 1000
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0
emf_settings.R = 0
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 116, 97, 110, 100, 97, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([56, 77, 84, 49, 55, 53, 86, 45, 49, 53, 48, 45, 86, 83, 83, 52, 50, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0.5
stage_settings.Units = bytes([0, 109, 0, 114, 101, 101, 0, 0])
stage_settings.MaxSpeed = 10
stage_settings.TravelRange = 150
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 111, 116, 105, 111, 110, 32, 67, 111, 110, 116, 114, 111, 108, 32, 80])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_STEP | MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 5000
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 1000
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 1
gear_settings.ReductionOut = 1
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_THERMOCOUPLE | TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| [
"[email protected]"
] | |
fdd007c0b032c25b1cf46ac0944db9b9217a204f | 82c2c272fe07da8afafb1dc4630cae1d48575f23 | /aws_reko/apps.py | 58e8ecd9ec40e1978d3eb6f2563e2de7a8beda8b | [] | no_license | final-project-fastflix/Fastflix_WPS | d830ea1bd3aae31edd8fcdcb70434d214ba77bd0 | 1e4296df2f6d41fed8308dcd4d48912bb8cc0e1f | refs/heads/develop | 2022-12-13T09:22:37.553487 | 2019-08-22T06:30:45 | 2019-08-22T06:30:45 | 199,455,228 | 3 | 2 | null | 2022-12-08T05:56:42 | 2019-07-29T13:09:55 | JavaScript | UTF-8 | Python | false | false | 90 | py | from django.apps import AppConfig
class AwsRekoConfig(AppConfig):
name = 'aws_reko'
| [
"[email protected]"
] | |
308397ed048cf03a190ffa0c99b55d07196a45cf | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_97/591.py | 65f897b1fd4fe4677b641f162d03e1a08dcae786 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | # Recycled Numbers
# main code
fr = open('C-large.in', 'r')
fw = open('C-large.out', 'w')
numOfTestCase = int(fr.readline())
for x in range(0,numOfTestCase):
result = ""
print("========== Test case " + str(x+1) + " ==========")
line = fr.readline()
line = line.split(" ")
A = int(line[0])
B = int(line[1])
# initialize number of distinct recycle number
nDistinct = 0
for i in range(A,B+1):
# change to string
i_str = str(i)
i_str_recycle = i_str
strlen = len(i_str)
if strlen == 1:
# No recycle number possible
continue
from array import array
pairList = array('i')
for j in range(0,strlen):
i_str_recycle = i_str_recycle[strlen-1] + i_str_recycle[0:strlen-1]
if i_str_recycle != i_str and i_str_recycle[0] != '0' and (A <= int(i_str_recycle) and int(i_str_recycle) <= B) and int(i_str_recycle) > i :
# i_str_recycle should not be the same as i_str
# i_str_recycle should not be lead with digit 0
# i_str_recycle should not be in range A to B inclusive
# i_str_recycle should be bigger than i
repeatFlag = 0
# finally, there should be no repeat pair
for k in range(0,len(pairList)):
if pairList[k] == int(i_str_recycle):
repeatFlag = 1
if repeatFlag == 0:
nDistinct = nDistinct + 1
# print(i_str + ", " + i_str_recycle)
# put current pair to pairList to prevent double pair
pairList.append(int(i_str_recycle))
result = str(nDistinct)
fw.write("Case #" + str(x+1) + ": " + result + "\n")
fr.close()
fw.close()
| [
"[email protected]"
] | |
2136ceed7ded2995dc97b82ced276854c3146f10 | 6a044f45cd09695ea6f66f35bb8decf86a84607d | /installer/resources/pacbot_app/alb_https_listener.py | e285a11743e84dfb2706dd3e7435a718e88798c8 | [
"Apache-2.0"
] | permissive | ritesh74/pacbot | a07bdf82632342509f05b5c5dbb6eb6aaba40219 | 4b5361d99e7efbbc5603ec9c6568ba639105c773 | refs/heads/master | 2021-07-09T15:35:27.342903 | 2020-09-28T20:36:42 | 2020-09-28T20:36:42 | 199,405,428 | 1 | 0 | Apache-2.0 | 2019-07-29T07:53:26 | 2019-07-29T07:53:25 | null | UTF-8 | Python | false | false | 2,414 | py | from core.terraform.resources.aws.load_balancer import ALBListenerResource, ALBListenerRuleResource
from core.config import Settings
from resources.pacbot_app.alb import ApplicationLoadBalancer
from resources.pacbot_app import alb_target_groups as tg
PATH_PREFIX = '/api/'
class PacBotHttpsListener(ALBListenerResource):
load_balancer_arn = ApplicationLoadBalancer.get_output_attr('arn')
port = 443
protocol = "HTTPS"
ssl_policy = "ELBSecurityPolicy-2016-08"
certificate_arn = Settings.get('SSL_CERTIFICATE_ARN')
default_action_target_group_arn = tg.NginxALBTargetGroup.get_output_attr('arn')
default_action_type = "forward"
class BaseLR:
listener_arn = PacBotHttpsListener.get_output_attr('arn')
action_type = "forward"
condition_field = "path-pattern"
class ConfigALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.ConfigALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "config*"]
class AdminALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.AdminALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "admin*"]
class ComplianceALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.ComplianceALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "compliance*"]
class NotificationsALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.NotificationsALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "notifications*"]
class StatisticsALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.StatisticsALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "statistics*"]
class AssetALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.AssetALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "asset*"]
class AuthALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.AuthALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "auth*"]
class VulnerabilityALBHttpsListenerRule(ALBListenerRuleResource, BaseLR):
action_target_group_arn = tg.VulnerabilityALBTargetGroup.get_output_attr('arn')
condition_values = [PATH_PREFIX + "vulnerability*"]
| [
"[email protected]"
] | |
d4c94fe92d17941badd8ceec535168ec2c320fe2 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/scrapinghub_portia/portia-master/portia_server/portia_orm/tests/test_relationship.py | 5167cbf61f361b3da5dede41e29b542d6a11d4f7 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 66,255 | py | import mock
from .models import (OneToOneModel1, OneToOneModel2, ParentModel, ChildModel,
ManyToManyModel1, ManyToManyModel2, PolymorphicParentModel,
PolymorphicChildModel1, PolymorphicChildModel2)
from .utils import DataStoreTestCase, mock_storage
class OneToOneRelationshipTests(DataStoreTestCase):
def setUp(self):
super(OneToOneRelationshipTests, self).setUp()
self.storage = mock_storage({
'o2o-model-1.json':
'{'
' "id": "model-1",'
' "field": "model-1",'
' "m2": "model-2"'
'}',
'o2o-model-2.json':
'{'
' "id": "model-2",'
' "field": "model-2",'
' "m1": {'
' "id": "model-1",'
' "field": "model-1",'
' "m2": "model-2"'
' }'
'}',
})
def test_no_relation(self):
model1 = OneToOneModel1(id='model-1')
model2 = OneToOneModel2(id='model-2')
self.assertEqual(model1.m2, None)
self.assertEqual(model2.m1, None)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': None,
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': None,
})
def test_set_relation(self):
model1 = OneToOneModel1(id='model-1')
model2 = OneToOneModel2(id='model-2')
model2.m1 = model1
self.assertEqual(model1.m2, model2)
self.assertEqual(model2.m1, model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': 'model-2',
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': {
'id': 'model-1',
'm2': 'model-2',
},
})
def test_set_reverse_relation(self):
model1 = OneToOneModel1(id='model-1')
model2 = OneToOneModel2(id='model-2')
model1.m2 = model2
self.assertEqual(model1.m2, model2)
self.assertEqual(model2.m1, model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': 'model-2',
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': {
'id': 'model-1',
'm2': 'model-2',
},
})
def test_create_with_relation(self):
model1 = OneToOneModel1(id='model-1')
model2 = OneToOneModel2(id='model-2', m1=model1)
self.assertEqual(model1.m2, model2)
self.assertEqual(model2.m1, model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': 'model-2',
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': {
'id': 'model-1',
'm2': 'model-2',
},
})
def test_create_with_reverse_relation(self):
model2 = OneToOneModel2(id='model-2')
model1 = OneToOneModel1(id='model-1', m2=model2)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': 'model-2',
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': {
'id': 'model-1',
'm2': 'model-2',
},
})
def test_change_relation(self):
model1 = OneToOneModel1(id='model-1')
model2 = OneToOneModel2(id='model-2', m1=model1)
model3 = OneToOneModel1(id='model-3')
self.assertEqual(model1.m2, model2)
self.assertEqual(model2.m1, model1)
self.assertEqual(model3.m2, None)
model2.m1 = model3
self.assertEqual(model1.m2, None)
self.assertEqual(model2.m1, model3)
self.assertEqual(model3.m2, model2)
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': {
'id': 'model-3',
'm2': 'model-2',
},
})
def test_change_reverse_relation(self):
model1 = OneToOneModel1(id='model-1')
model2 = OneToOneModel2(id='model-2', m1=model1)
model3 = OneToOneModel1(id='model-3')
self.assertEqual(model1.m2, model2)
self.assertEqual(model2.m1, model1)
self.assertEqual(model3.m2, None)
model3.m2 = model2
self.assertEqual(model1.m2, None)
self.assertEqual(model2.m1, model3)
self.assertEqual(model3.m2, model2)
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': {
'id': 'model-3',
'm2': 'model-2',
},
})
def test_load_full(self):
model = OneToOneModel2(self.storage, id='model-2')
self.assertEqual(model.dump(), {
'id': 'model-2',
'field': 'model-2',
'm1': {
'id': 'model-1',
'field': 'model-1',
'm2': 'model-2',
},
})
self.storage.open.assert_called_once_with('o2o-model-2.json')
def test_load_partial(self):
model = OneToOneModel1(self.storage, id='model-1')
self.assertEqual(model.dump(), {
'id': 'model-1',
'field': 'model-1',
'm2': 'model-2',
})
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('o2o-model-1.json'),
mock.call('o2o-model-2.json')])
def test_save_field(self):
model1 = OneToOneModel1(self.storage, id='model-1')
model2 = model1.m2
model1.field = 'changed-field-1'
model2.field = 'changed-field-2'
model2.save()
self.assertEqual(self.storage.save.call_count, 1)
self.storage.save.assert_has_calls([
mock.call('o2o-model-2.json', mock.ANY)])
self.assertEqual(
self.storage.files['o2o-model-2.json'],
'{\n'
' "field": "changed-field-2", \n'
' "id": "model-2", \n'
' "m1": {\n'
' "field": "model-1", \n'
' "id": "model-1", \n'
' "m2": "model-2"\n'
' }\n'
'}')
model1.save()
self.assertEqual(self.storage.save.call_count, 3)
self.storage.save.assert_has_calls([
mock.call('o2o-model-2.json', mock.ANY),
mock.call('o2o-model-1.json', mock.ANY),
mock.call('o2o-model-2.json', mock.ANY)])
self.assertEqual(
self.storage.files['o2o-model-1.json'],
'{\n'
' "field": "changed-field-1", \n'
' "id": "model-1", \n'
' "m2": "model-2"\n'
'}')
self.assertEqual(
self.storage.files['o2o-model-2.json'],
'{\n'
' "field": "changed-field-2", \n'
' "id": "model-2", \n'
' "m1": {\n'
' "field": "changed-field-1", \n'
' "id": "model-1", \n'
' "m2": "model-2"\n'
' }\n'
'}')
def test_save_id(self):
model1 = OneToOneModel1(self.storage, id='model-1')
model2 = model1.m2
model1.id = 'changed-id-1'
model2.id = 'changed-id-2'
model2.save()
self.assertEqual(self.storage.save.call_count, 2)
self.storage.save.assert_has_calls([
mock.call('o2o-model-2.json', mock.ANY),
mock.call('o2o-model-1.json', mock.ANY)])
self.assertEqual(
self.storage.files['o2o-model-1.json'],
'{\n'
' "field": "model-1", \n'
' "id": "model-1", \n'
' "m2": "changed-id-2"\n'
'}')
self.assertEqual(
self.storage.files['o2o-model-2.json'],
'{\n'
' "field": "model-2", \n'
' "id": "changed-id-2", \n'
' "m1": {\n'
' "field": "model-1", \n'
' "id": "model-1", \n'
' "m2": "changed-id-2"\n'
' }\n'
'}')
model1.save()
self.assertEqual(self.storage.save.call_count, 4)
self.storage.save.assert_has_calls([
mock.call('o2o-model-2.json', mock.ANY),
mock.call('o2o-model-1.json', mock.ANY),
mock.call('o2o-model-1.json', mock.ANY),
mock.call('o2o-model-2.json', mock.ANY)])
self.assertEqual(
self.storage.files['o2o-model-1.json'],
'{\n'
' "field": "model-1", \n'
' "id": "changed-id-1", \n'
' "m2": "changed-id-2"\n'
'}')
self.assertEqual(
self.storage.files['o2o-model-2.json'],
'{\n'
' "field": "model-2", \n'
' "id": "changed-id-2", \n'
' "m1": {\n'
' "field": "model-1", \n'
' "id": "changed-id-1", \n'
' "m2": "changed-id-2"\n'
' }\n'
'}')
class OneToManyRelationshipTests(DataStoreTestCase):
def setUp(self):
super(OneToManyRelationshipTests, self).setUp()
self.storage = mock_storage({
'parents.json':
'{'
' "id": "parent-1",'
' "field": "parent-1",'
' "children": ['
' {'
' "id": "child-1",'
' "parent": "parent-1"'
' }'
' ]'
'}',
'parent-1/children.json':
'['
' {'
' "id": "child-1",'
' "field": "child-1",'
' "parent": "parent-1"'
' }'
']',
})
def test_no_children(self):
parent = ParentModel(id='parent-1')
self.assertEqual(len(parent.children), 0)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [],
})
def test_set_children(self):
parent = ParentModel(id='parent-1')
child = ChildModel(id='child-1')
parent.children = [child]
self.assertEqual(child.parent, parent)
self.assertEqual(len(parent.children), 1)
self.assertEqual(parent.children[0], child)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'id': 'child-1',
'parent': 'parent-1',
},
],
})
def test_add_to_children(self):
parent = ParentModel(id='parent-1')
child = ChildModel(id='child-1')
parent.children.add(child)
self.assertEqual(child.parent, parent)
self.assertEqual(len(parent.children), 1)
self.assertEqual(parent.children[0], child)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'id': 'child-1',
'parent': 'parent-1',
},
],
})
def test_set_parent(self):
parent = ParentModel(id='parent-1')
child = ChildModel(id='child-1')
child.parent = parent
self.assertEqual(child.parent, parent)
self.assertEqual(len(parent.children), 1)
self.assertEqual(parent.children[0], child)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'id': 'child-1',
'parent': 'parent-1',
},
],
})
def test_create_with_children(self):
child = ChildModel(id='child-1')
parent = ParentModel(id='parent-1', children=[child])
self.assertEqual(child.parent, parent)
self.assertEqual(len(parent.children), 1)
self.assertEqual(parent.children[0], child)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'id': 'child-1',
'parent': 'parent-1',
},
],
})
def test_create_with_parent(self):
parent = ParentModel(id='parent-1')
child = ChildModel(id='child-1', parent=parent)
self.assertEqual(child.parent, parent)
self.assertEqual(len(parent.children), 1)
self.assertEqual(parent.children[0], child)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'id': 'child-1',
'parent': 'parent-1',
},
],
})
def test_change_parent(self):
parent = ParentModel(id='parent-1')
parent2 = ParentModel(id='parent-2')
child = ChildModel(id='child-1', parent=parent)
self.assertEqual(child.parent, parent)
self.assertEqual(len(parent.children), 1)
self.assertEqual(len(parent2.children), 0)
child.parent = parent2
self.assertEqual(child.parent, parent2)
self.assertEqual(len(parent.children), 0)
self.assertEqual(len(parent2.children), 1)
self.assertEqual(parent2.children[0], child)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [],
})
self.assertEqual(parent2.dump(), {
'id': 'parent-2',
'children': [
{
'id': 'child-1',
'parent': 'parent-2',
},
],
})
def test_change_children(self):
parent = ParentModel(id='parent-1')
child = ChildModel(id='child-1', parent=parent)
child2 = ChildModel(id='child-2')
self.assertEqual(child.parent, parent)
self.assertEqual(child2.parent, None)
self.assertEqual(len(parent.children), 1)
parent.children = [child, child2]
self.assertEqual(child.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'id': 'child-1',
'parent': 'parent-1',
},
{
'id': 'child-2',
'parent': 'parent-1',
},
],
})
def test_getitem(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
self.assertIs(parent.children[0], child1)
self.assertIs(parent.children['child-1'], child1)
self.assertIs(parent.children[child1], child1)
with self.assertRaises(IndexError):
parent.children[1000]
with self.assertRaises(KeyError):
parent.children['child-4']
self.assertEqual(parent.children[1:], [child2, child3])
def test_get(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
self.assertIs(parent.children.get(0), child1)
self.assertIs(parent.children.get('child-1'), child1)
self.assertIs(parent.children.get(child1), child1)
self.assertIs(parent.children.get('child-4'), None)
sentinel = object()
self.assertIs(parent.children.get('child-4', default=sentinel), sentinel)
def test_setitem(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
child1b = ChildModel(id='child-1')
child1c = ChildModel(id='child-1')
child1d = ChildModel(id='child-1')
child4 = ChildModel(id='child-4')
child5 = ChildModel(id='child-5')
child6 = ChildModel(id='child-6')
child7 = ChildModel(id='child-7')
parent.children[0] = child1b
self.assertIs(parent.children[0], child1b)
parent.children['child-1'] = child1c
self.assertIs(parent.children[0], child1c)
parent.children[child1] = child1d
self.assertIs(parent.children[0], child1d)
self.assertListEqual(parent.children, [child1d, child2, child3])
parent.children[1:1] = [child4, child5]
self.assertIs(child4.parent, parent)
self.assertIs(child5.parent, parent)
self.assertListEqual(parent.children,
[child1d, child4, child5, child2, child3])
parent.children[:2] = [child6, child7]
self.assertIs(child6.parent, parent)
self.assertIs(child7.parent, parent)
self.assertIs(child1d.parent, None)
self.assertIs(child4.parent, None)
self.assertListEqual(parent.children,
[child6, child7, child5, child2, child3])
with self.assertRaises(ValueError):
parent.children[0:0] = [child2]
def test_delitem(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
child4 = ChildModel(id='child-4')
child5 = ChildModel(id='child-5')
parent = ParentModel(id='parent-1', children=[
child1, child2, child3, child4, child5])
del parent.children[0]
del parent.children['child-3']
del parent.children[child4]
self.assertListEqual(parent.children, [child2, child5])
self.assertIs(child1.parent, None)
self.assertIs(child3.parent, None)
self.assertIs(child4.parent, None)
def test_append(self):
child1 = ChildModel(id='child-1')
child1b = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2])
parent.children.append(child3)
self.assertListEqual(parent.children, [child1, child2, child3])
self.assertIs(child3.parent, parent)
with self.assertRaises(ValueError):
parent.children.append(child1b)
def test_add(self):
child1 = ChildModel(id='child-1')
child1b = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2])
parent.children.add(child3)
self.assertListEqual(parent.children, [child1, child2, child3])
self.assertIs(child3.parent, parent)
parent.children.add(child1b)
self.assertListEqual(parent.children, [child1, child2, child3])
def test_insert(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child2, child3])
parent.children.insert(0, child1)
self.assertListEqual(parent.children, [child1, child2, child3])
self.assertIs(child1.parent, parent)
def test_remove(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
parent.children.remove(child1)
self.assertListEqual(parent.children, [child2, child3])
self.assertIs(child1.parent, None)
with self.assertRaises(ValueError):
parent.children.remove(child1)
def test_discard(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
parent.children.discard(child1)
self.assertListEqual(parent.children, [child2, child3])
self.assertIs(child1.parent, None)
parent.children.discard(child1)
self.assertListEqual(parent.children, [child2, child3])
def test_pop(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
pop1 = parent.children.pop()
self.assertIs(pop1, child3)
self.assertListEqual(parent.children, [child1, child2])
self.assertIs(child3.parent, None)
pop2 = parent.children.pop('child-1')
self.assertIs(pop2, child1)
self.assertListEqual(parent.children, [child2])
self.assertIs(child1.parent, None)
def test_clear(self):
child1 = ChildModel(id='child-1')
child2 = ChildModel(id='child-2')
child3 = ChildModel(id='child-3')
parent = ParentModel(id='parent-1', children=[child1, child2, child3])
parent.children.clear()
self.assertListEqual(parent.children, [])
self.assertIs(child1.parent, None)
self.assertIs(child2.parent, None)
self.assertIs(child3.parent, None)
def test_load_full(self):
model = ParentModel(self.storage, id='parent-1')
self.assertEqual(model.dump(), {
'id': 'parent-1',
'field': 'parent-1',
'children': [
{
'id': 'child-1',
'field': 'child-1',
'parent': 'parent-1',
},
],
})
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('parents.json'),
mock.call('parent-1/children.json')])
def test_load_partial(self):
model = ChildModel(self.storage, id='child-1',
parent=ParentModel(self.storage, id='parent-1'))
self.assertEqual(model.dump(), {
'id': 'child-1',
'field': 'child-1',
'parent': 'parent-1',
})
self.assertEqual(model, model.parent.children[0])
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('parents.json'),
mock.call('parent-1/children.json')])
self.assertEqual(model.parent.dump(), {
'id': 'parent-1',
'field': 'parent-1',
'children': [
{
'id': 'child-1',
'field': 'child-1',
'parent': 'parent-1',
},
],
})
def test_save_field(self):
parent = ParentModel(self.storage, id='parent-1')
child = parent.children[0]
child.field = 'changed-id-1'
parent.field = 'changed-id-2'
parent.save()
self.assertEqual(self.storage.save.call_count, 1)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY)])
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "field": "child-1", \n'
' "id": "child-1", \n'
' "parent": "parent-1"\n'
' }\n'
' ], \n'
' "field": "changed-id-2", \n'
' "id": "parent-1"\n'
'}')
child.save()
self.assertEqual(self.storage.save.call_count, 3)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY),
mock.call('parent-1/children.json', mock.ANY),
mock.call('parents.json', mock.ANY)])
self.assertEqual(
self.storage.files['parent-1/children.json'],
'[\n'
' {\n'
' "field": "changed-id-1", \n'
' "id": "child-1", \n'
' "parent": "parent-1"\n'
' }\n'
']')
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "field": "changed-id-1", \n'
' "id": "child-1", \n'
' "parent": "parent-1"\n'
' }\n'
' ], \n'
' "field": "changed-id-2", \n'
' "id": "parent-1"\n'
'}')
def test_save_id(self):
parent = ParentModel(self.storage, id='parent-1')
child = parent.children[0]
child.id = 'changed-id-1'
parent.id = 'changed-id-2'
parent.save()
self.assertEqual(self.storage.save.call_count, 2)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY),
mock.call('changed-id-2/children.json', mock.ANY)])
self.storage.delete.assert_called_once_with('parent-1/children.json')
self.assertEqual(
self.storage.files['changed-id-2/children.json'],
'[\n'
' {\n'
' "field": "child-1", \n'
' "id": "child-1", \n'
' "parent": "changed-id-2"\n'
' }\n'
']')
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "field": "child-1", \n'
' "id": "child-1", \n'
' "parent": "changed-id-2"\n'
' }\n'
' ], \n'
' "field": "parent-1", \n'
' "id": "changed-id-2"\n'
'}')
child.save()
self.assertEqual(self.storage.save.call_count, 4)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY),
mock.call('changed-id-2/children.json', mock.ANY),
mock.call('changed-id-2/children.json', mock.ANY),
mock.call('parents.json', mock.ANY)])
self.assertEqual(
self.storage.files['changed-id-2/children.json'],
'[\n'
' {\n'
' "field": "child-1", \n'
' "id": "changed-id-1", \n'
' "parent": "changed-id-2"\n'
' }\n'
']')
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "field": "child-1", \n'
' "id": "changed-id-1", \n'
' "parent": "changed-id-2"\n'
' }\n'
' ], \n'
' "field": "parent-1", \n'
' "id": "changed-id-2"\n'
'}')
class ManyToManyRelationshipTests(DataStoreTestCase):
def setUp(self):
super(ManyToManyRelationshipTests, self).setUp()
self.storage = mock_storage({
'm2m-model-1.json':
'{'
' "id": "model-1",'
' "field": "model-1",'
' "m2": ['
' "model-2"'
' ]'
'}',
'm2m-model-2.json':
'['
' {'
' "id": "model-2",'
' "field": "model-2",'
' "m1": ['
' {'
' "id": "model-1",'
' "field": "model-1",'
' "m2": ['
' "model-2"'
' ]'
' }'
' ]'
' }'
']',
})
def test_no_relation(self):
model1 = ManyToManyModel1(id='model-1')
model2 = ManyToManyModel2(id='model-2')
self.assertEqual(len(model1.m2), 0)
self.assertEqual(len(model2.m1), 0)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [],
})
def test_set_relation(self):
model1 = ManyToManyModel1(id='model-1')
model2 = ManyToManyModel2(id='model-2')
model2.m1.append(model1)
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-1',
'm2': [
'model-2',
],
},
],
})
def test_set_reverse_relation(self):
model1 = ManyToManyModel1(id='model-1')
model2 = ManyToManyModel2(id='model-2')
model1.m2.append(model2)
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-1',
'm2': [
'model-2',
],
},
],
})
def test_create_with_relation(self):
model1 = ManyToManyModel1(id='model-1')
model2 = ManyToManyModel2(id='model-2', m1=[model1])
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-1',
'm2': [
'model-2',
],
},
],
})
def test_create_with_reverse_relation(self):
model2 = ManyToManyModel2(id='model-2')
model1 = ManyToManyModel1(id='model-1', m2=[model2])
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-1',
'm2': [
'model-2',
],
},
],
})
def test_change_relation(self):
model1 = ManyToManyModel1(id='model-1')
model2 = ManyToManyModel2(id='model-2', m1=[model1])
model3 = ManyToManyModel1(id='model-3')
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(len(model3.m2), 0)
model2.m1.append(model3)
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 2)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(model2.m1[1], model3)
self.assertEqual(len(model3.m2), 1)
self.assertEqual(model3.m2[0], model2)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-1',
'm2': [
'model-2',
],
},
{
'id': 'model-3',
'm2': [
'model-2',
],
},
],
})
self.assertEqual(model3.dump(), {
'id': 'model-3',
'm2': [
'model-2',
],
})
model2.m1.remove(model1)
self.assertEqual(len(model1.m2), 0)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model3)
self.assertEqual(len(model3.m2), 1)
self.assertEqual(model3.m2[0], model2)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-3',
'm2': [
'model-2',
],
},
],
})
self.assertEqual(model3.dump(), {
'id': 'model-3',
'm2': [
'model-2',
],
})
def test_change_reverse_relation(self):
model1 = ManyToManyModel1(id='model-1')
model2 = ManyToManyModel2(id='model-2', m1=[model1])
model3 = ManyToManyModel1(id='model-3')
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(len(model3.m2), 0)
model3.m2.append(model2)
self.assertEqual(len(model1.m2), 1)
self.assertEqual(model1.m2[0], model2)
self.assertEqual(len(model2.m1), 2)
self.assertEqual(model2.m1[0], model1)
self.assertEqual(model2.m1[1], model3)
self.assertEqual(len(model3.m2), 1)
self.assertEqual(model3.m2[0], model2)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-1',
'm2': [
'model-2',
],
},
{
'id': 'model-3',
'm2': [
'model-2',
],
},
],
})
self.assertEqual(model3.dump(), {
'id': 'model-3',
'm2': [
'model-2',
],
})
model1.m2.clear()
self.assertEqual(len(model1.m2), 0)
self.assertEqual(len(model2.m1), 1)
self.assertEqual(model2.m1[0], model3)
self.assertEqual(len(model3.m2), 1)
self.assertEqual(model3.m2[0], model2)
self.assertEqual(model1.dump(), {
'id': 'model-1',
'm2': [],
})
self.assertEqual(model2.dump(), {
'id': 'model-2',
'm1': [
{
'id': 'model-3',
'm2': [
'model-2',
],
},
],
})
self.assertEqual(model3.dump(), {
'id': 'model-3',
'm2': [
'model-2',
],
})
def test_load_full(self):
model = ManyToManyModel2(self.storage, id='model-2')
self.assertEqual(model.dump(), {
'id': 'model-2',
'field': 'model-2',
'm1': [
{
'id': 'model-1',
'field': 'model-1',
'm2': [
'model-2',
],
},
],
})
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('m2m-model-2.json'),
mock.call('m2m-model-1.json')])
def test_load_partial(self):
model = ManyToManyModel1(self.storage, id='model-1')
self.assertEqual(model.dump(), {
'id': 'model-1',
'field': 'model-1',
'm2': [
'model-2',
],
})
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('m2m-model-1.json'),
mock.call('m2m-model-2.json')])
def test_save_field(self):
model1 = ManyToManyModel1(self.storage, id='model-1')
model2 = model1.m2[0]
model1.field = 'changed-field-1'
model2.field = 'changed-field-2'
model2.save()
self.storage.save.assert_called_once_with('m2m-model-2.json', mock.ANY)
self.assertEqual(
self.storage.files['m2m-model-2.json'],
'[\n'
' {\n'
' "field": "changed-field-2", \n'
' "id": "model-2", \n'
' "m1": [\n'
' {\n'
' "field": "model-1", \n'
' "id": "model-1", \n'
' "m2": [\n'
' "model-2"\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
']')
model1.save()
self.assertEqual(self.storage.save.call_count, 3)
self.storage.save.assert_has_calls([
mock.call('m2m-model-2.json', mock.ANY),
mock.call('m2m-model-1.json', mock.ANY),
mock.call('m2m-model-2.json', mock.ANY)])
self.assertEqual(
self.storage.files['m2m-model-1.json'],
'{\n'
' "field": "changed-field-1", \n'
' "id": "model-1", \n'
' "m2": [\n'
' "model-2"\n'
' ]\n'
'}')
self.assertEqual(
self.storage.files['m2m-model-2.json'],
'[\n'
' {\n'
' "field": "changed-field-2", \n'
' "id": "model-2", \n'
' "m1": [\n'
' {\n'
' "field": "changed-field-1", \n'
' "id": "model-1", \n'
' "m2": [\n'
' "model-2"\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
']')
def test_save_id(self):
model1 = ManyToManyModel1(self.storage, id='model-1')
model2 = model1.m2[0]
model1.id = 'changed-id-1'
model2.id = 'changed-id-2'
model2.save()
self.assertEqual(self.storage.save.call_count, 2)
self.storage.save.assert_has_calls([
mock.call('m2m-model-2.json', mock.ANY),
mock.call('m2m-model-1.json', mock.ANY)])
self.assertEqual(
self.storage.files['m2m-model-1.json'],
'{\n'
' "field": "model-1", \n'
' "id": "model-1", \n'
' "m2": [\n'
' "changed-id-2"\n'
' ]\n'
'}')
self.assertEqual(
self.storage.files['m2m-model-2.json'],
'[\n'
' {\n'
' "field": "model-2", \n'
' "id": "changed-id-2", \n'
' "m1": [\n'
' {\n'
' "field": "model-1", \n'
' "id": "model-1", \n'
' "m2": [\n'
' "changed-id-2"\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
']')
model1.save()
self.assertEqual(self.storage.save.call_count, 4)
self.storage.save.assert_has_calls([
mock.call('m2m-model-2.json', mock.ANY),
mock.call('m2m-model-1.json', mock.ANY),
mock.call('m2m-model-1.json', mock.ANY),
mock.call('m2m-model-2.json', mock.ANY)])
self.assertEqual(
self.storage.files['m2m-model-1.json'],
'{\n'
' "field": "model-1", \n'
' "id": "changed-id-1", \n'
' "m2": [\n'
' "changed-id-2"\n'
' ]\n'
'}')
self.assertEqual(
self.storage.files['m2m-model-2.json'],
'[\n'
' {\n'
' "field": "model-2", \n'
' "id": "changed-id-2", \n'
' "m1": [\n'
' {\n'
' "field": "model-1", \n'
' "id": "changed-id-1", \n'
' "m2": [\n'
' "changed-id-2"\n'
' ]\n'
' }\n'
' ]\n'
' }\n'
']')
class PolymorphicRelationshipTests(DataStoreTestCase):
def setUp(self):
super(PolymorphicRelationshipTests, self).setUp()
self.storage = mock_storage({
'parents.json':
'{'
' "id": "parent-1",'
' "field": "parent-1",'
' "children": ['
' {'
' "type": "PolymorphicChildModel1",'
' "id": "child-1"'
' },'
' {'
' "_type_": "PolymorphicChildModel2",'
' "id": "child-2"'
' }'
' ]'
'}',
'children.json':
'['
' {'
' "type": "PolymorphicChildModel1",'
' "id": "child-1",'
' "field1": "child-1",'
' "parent": "parent-1"'
' },'
' {'
' "_type_": "PolymorphicChildModel2",'
' "id": "child-2",'
' "field2": "child-2",'
' "parent": "parent-1"'
' }'
']',
})
def test_no_children(self):
parent = PolymorphicParentModel(id='parent-1')
self.assertEqual(len(parent.children), 0)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [],
})
def test_set_children(self):
parent = PolymorphicParentModel(id='parent-1')
child1 = PolymorphicChildModel1(id='child-1', field1='field-1')
child2 = PolymorphicChildModel2(id='child-2', field2='field-2')
parent.children = [child1, child2]
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.children[0], child1)
self.assertEqual(parent.children[1], child2)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
def test_add_to_children(self):
parent = PolymorphicParentModel(id='parent-1')
child1 = PolymorphicChildModel1(id='child-1', field1='field-1')
child2 = PolymorphicChildModel2(id='child-2', field2='field-2')
parent.children.add(child2)
parent.children.add(child1)
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.children[0], child2)
self.assertEqual(parent.children[1], child1)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
],
})
def test_set_parent(self):
parent = PolymorphicParentModel(id='parent-1')
child1 = PolymorphicChildModel1(id='child-1', field1='field-1')
child2 = PolymorphicChildModel2(id='child-2', field2='field-2')
child1.parent = parent
child2.parent = parent
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.children[0], child1)
self.assertEqual(parent.children[1], child2)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
def test_create_with_children(self):
child1 = PolymorphicChildModel1(id='child-1', field1='field-1')
child2 = PolymorphicChildModel2(id='child-2', field2='field-2')
parent = PolymorphicParentModel(id='parent-1',
children=[child1, child2])
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.children[0], child1)
self.assertEqual(parent.children[1], child2)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
def test_create_with_parent(self):
parent = PolymorphicParentModel(id='parent-1')
child1 = PolymorphicChildModel1(id='child-1', field1='field-1',
parent=parent)
child2 = PolymorphicChildModel2(id='child-2', field2='field-2',
parent=parent)
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.children[0], child1)
self.assertEqual(parent.children[1], child2)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
def test_change_parent(self):
parent1 = PolymorphicParentModel(id='parent-1')
parent2 = PolymorphicParentModel(id='parent-2')
child1 = PolymorphicChildModel1(id='child-1', field1='field-1',
parent=parent1)
child2 = PolymorphicChildModel2(id='child-2', field2='field-2',
parent=parent1)
self.assertEqual(child1.parent, parent1)
self.assertEqual(child2.parent, parent1)
self.assertEqual(len(parent1.children), 2)
self.assertEqual(len(parent2.children), 0)
child2.parent = parent2
self.assertEqual(child1.parent, parent1)
self.assertEqual(child2.parent, parent2)
self.assertEqual(len(parent1.children), 1)
self.assertEqual(len(parent2.children), 1)
self.assertEqual(parent1.children[0], child1)
self.assertEqual(parent2.children[0], child2)
self.assertEqual(parent1.dump(), {
'id': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
],
})
self.assertEqual(parent2.dump(), {
'id': 'parent-2',
'children': [
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
child1.parent = parent2
self.assertEqual(child1.parent, parent2)
self.assertEqual(child2.parent, parent2)
self.assertEqual(len(parent1.children), 0)
self.assertEqual(len(parent2.children), 2)
self.assertEqual(parent2.children[0], child2)
self.assertEqual(parent2.children[1], child1)
self.assertEqual(parent1.dump(), {
'id': 'parent-1',
'children': [],
})
self.assertEqual(parent2.dump(), {
'id': 'parent-2',
'children': [
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
],
})
def test_change_children(self):
parent = PolymorphicParentModel(id='parent-1')
child1 = PolymorphicChildModel1(id='child-1', field1='field-1',
parent=parent)
child2 = PolymorphicChildModel2(id='child-2', field2='field-2')
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, None)
self.assertEqual(len(parent.children), 1)
parent.children = [child1, child2]
self.assertEqual(child1.parent, parent)
self.assertEqual(child2.parent, parent)
self.assertEqual(len(parent.children), 2)
self.assertEqual(parent.dump(), {
'id': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
def test_getitem(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel2(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
self.assertIs(parent.children[0], child1)
self.assertIs(parent.children['child-1'], child1)
self.assertIs(parent.children[child1], child1)
self.assertIs(parent.children[1], child2)
self.assertIs(parent.children['child-2'], child2)
self.assertIs(parent.children[child2], child2)
with self.assertRaises(IndexError):
parent.children[1000]
with self.assertRaises(KeyError):
parent.children['child-4']
self.assertEqual(parent.children[2:], [child3])
def test_get(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel2(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
self.assertIs(parent.children.get(0), child1)
self.assertIs(parent.children.get('child-1'), child1)
self.assertIs(parent.children.get(child1), child1)
self.assertIs(parent.children.get(1), child2)
self.assertIs(parent.children.get('child-2'), child2)
self.assertIs(parent.children.get(child2), child2)
self.assertIs(parent.children.get('child-4'), None)
sentinel = object()
self.assertIs(parent.children.get('child-4', default=sentinel), sentinel)
def test_setitem(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel2(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
child1b = PolymorphicChildModel1(id='child-1')
child1c = PolymorphicChildModel1(id='child-1')
child1d = PolymorphicChildModel1(id='child-1')
child4 = PolymorphicChildModel2(id='child-4')
child5 = PolymorphicChildModel1(id='child-5')
child6 = PolymorphicChildModel2(id='child-6')
child7 = PolymorphicChildModel1(id='child-7')
parent.children[0] = child1b
self.assertIs(parent.children[0], child1b)
parent.children['child-1'] = child1c
self.assertIs(parent.children[0], child1c)
parent.children[child1] = child1d
self.assertIs(parent.children[0], child1d)
self.assertListEqual(parent.children, [child1d, child2, child3])
parent.children[1:1] = [child4, child5]
self.assertIs(child4.parent, parent)
self.assertIs(child5.parent, parent)
self.assertListEqual(parent.children,
[child1d, child4, child5, child2, child3])
parent.children[:2] = [child6, child7]
self.assertIs(child6.parent, parent)
self.assertIs(child7.parent, parent)
self.assertIs(child1d.parent, None)
self.assertIs(child4.parent, None)
self.assertListEqual(parent.children,
[child6, child7, child5, child2, child3])
with self.assertRaises(ValueError):
parent.children[0:0] = [child2]
def test_delitem(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel2(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
child4 = PolymorphicChildModel2(id='child-4')
child5 = PolymorphicChildModel1(id='child-5')
parent = PolymorphicParentModel(id='parent-1', children=[
child1, child2, child3, child4, child5])
del parent.children[0]
del parent.children['child-3']
del parent.children[child4]
self.assertListEqual(parent.children, [child2, child5])
self.assertIs(child1.parent, None)
self.assertIs(child3.parent, None)
self.assertIs(child4.parent, None)
def test_append(self):
child1 = PolymorphicChildModel1(id='child-1')
child1b = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel2(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2])
parent.children.append(child3)
self.assertListEqual(parent.children, [child1, child2, child3])
self.assertIs(child3.parent, parent)
with self.assertRaises(ValueError):
parent.children.append(child1b)
def test_add(self):
child1 = PolymorphicChildModel1(id='child-1')
child1b = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel2(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2])
parent.children.add(child3)
self.assertListEqual(parent.children, [child1, child2, child3])
self.assertIs(child3.parent, parent)
parent.children.add(child1b)
self.assertListEqual(parent.children, [child1, child2, child3])
def test_insert(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child2, child3])
parent.children.insert(0, child1)
self.assertListEqual(parent.children, [child1, child2, child3])
self.assertIs(child1.parent, parent)
def test_remove(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
parent.children.remove(child1)
self.assertListEqual(parent.children, [child2, child3])
self.assertIs(child1.parent, None)
with self.assertRaises(ValueError):
parent.children.remove(child1)
def test_discard(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
parent.children.discard(child1)
self.assertListEqual(parent.children, [child2, child3])
self.assertIs(child1.parent, None)
parent.children.discard(child1)
self.assertListEqual(parent.children, [child2, child3])
def test_pop(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
pop1 = parent.children.pop()
self.assertIs(pop1, child3)
self.assertListEqual(parent.children, [child1, child2])
self.assertIs(child3.parent, None)
pop2 = parent.children.pop('child-1')
self.assertIs(pop2, child1)
self.assertListEqual(parent.children, [child2])
self.assertIs(child1.parent, None)
def test_clear(self):
child1 = PolymorphicChildModel1(id='child-1')
child2 = PolymorphicChildModel1(id='child-2')
child3 = PolymorphicChildModel1(id='child-3')
parent = PolymorphicParentModel(
id='parent-1', children=[child1, child2, child3])
parent.children.clear()
self.assertListEqual(parent.children, [])
self.assertIs(child1.parent, None)
self.assertIs(child2.parent, None)
self.assertIs(child3.parent, None)
def test_load_full(self):
model = PolymorphicParentModel(self.storage, id='parent-1')
self.assertEqual(model.dump(), {
'id': 'parent-1',
'field': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('parents.json'),
mock.call('children.json')])
def test_load_partial(self):
model = PolymorphicChildModel1(
self.storage, id='child-1', parent=PolymorphicParentModel(
self.storage, id='parent-1'))
self.assertEqual(model.dump(), {
'type': 'PolymorphicChildModel1',
'id': 'child-1',
'field1': 'child-1',
'parent': 'parent-1',
})
self.assertEqual(model, model.parent.children[0])
self.assertEqual(self.storage.open.call_count, 2)
self.storage.open.assert_has_calls([
mock.call('parents.json'),
mock.call('children.json')])
self.assertEqual(model.parent.dump(), {
'id': 'parent-1',
'field': 'parent-1',
'children': [
{
'type': 'PolymorphicChildModel1',
'id': 'child-1',
},
{
'_type_': 'PolymorphicChildModel2',
'id': 'child-2',
},
],
})
def test_save_field(self):
parent = PolymorphicParentModel(self.storage, id='parent-1')
child = parent.children[0]
child.field1 = 'changed-id-1'
parent.field = 'changed-id-2'
parent.save()
self.assertEqual(self.storage.save.call_count, 1)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY)])
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "id": "child-1", \n'
' "type": "PolymorphicChildModel1"\n'
' }, \n'
' {\n'
' "_type_": "PolymorphicChildModel2", \n'
' "id": "child-2"\n'
' }\n'
' ], \n'
' "field": "changed-id-2", \n'
' "id": "parent-1"\n'
'}')
child.save()
self.assertEqual(self.storage.save.call_count, 2)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY),
mock.call('children.json', mock.ANY)])
self.assertEqual(
self.storage.files['children.json'],
'[\n'
' {\n'
' "field1": "changed-id-1", \n'
' "id": "child-1", \n'
' "parent": "parent-1", \n'
' "type": "PolymorphicChildModel1"\n'
' }, \n'
' {\n'
' "_type_": "PolymorphicChildModel2", \n'
' "field2": "child-2", \n'
' "id": "child-2", \n'
' "parent": "parent-1"\n'
' }\n'
']')
def test_save_id(self):
parent = PolymorphicParentModel(self.storage, id='parent-1')
child = parent.children[0]
child.id = 'changed-id-1'
parent.id = 'changed-id-2'
parent.save()
self.assertEqual(self.storage.save.call_count, 2)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY),
mock.call('children.json', mock.ANY)])
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "id": "child-1", \n'
' "type": "PolymorphicChildModel1"\n'
' }, \n'
' {\n'
' "_type_": "PolymorphicChildModel2", \n'
' "id": "child-2"\n'
' }\n'
' ], \n'
' "field": "parent-1", \n'
' "id": "changed-id-2"\n'
'}')
self.assertEqual(
self.storage.files['children.json'],
'[\n'
' {\n'
' "field1": "child-1", \n'
' "id": "child-1", \n'
' "parent": "changed-id-2", \n'
' "type": "PolymorphicChildModel1"\n'
' }, \n'
' {\n'
' "_type_": "PolymorphicChildModel2", \n'
' "field2": "child-2", \n'
' "id": "child-2", \n'
' "parent": "changed-id-2"\n'
' }\n'
']')
child.save()
self.assertEqual(self.storage.save.call_count, 4)
self.storage.save.assert_has_calls([
mock.call('parents.json', mock.ANY),
mock.call('children.json', mock.ANY),
mock.call('children.json', mock.ANY),
mock.call('parents.json', mock.ANY)])
self.assertEqual(
self.storage.files['children.json'],
'[\n'
' {\n'
' "field1": "child-1", \n'
' "id": "changed-id-1", \n'
' "parent": "changed-id-2", \n'
' "type": "PolymorphicChildModel1"\n'
' }, \n'
' {\n'
' "_type_": "PolymorphicChildModel2", \n'
' "field2": "child-2", \n'
' "id": "child-2", \n'
' "parent": "changed-id-2"\n'
' }\n'
']')
self.assertEqual(
self.storage.files['parents.json'],
'{\n'
' "children": [\n'
' {\n'
' "id": "changed-id-1", \n'
' "type": "PolymorphicChildModel1"\n'
' }, \n'
' {\n'
' "_type_": "PolymorphicChildModel2", \n'
' "id": "child-2"\n'
' }\n'
' ], \n'
' "field": "parent-1", \n'
' "id": "changed-id-2"\n'
'}')
| [
"[email protected]"
] | |
8848a2025763c8c406ee3f306ba82e82e8db0a70 | bf12e13c0ab5ccf2fc32509b02aaae6b6a2e3327 | /examples/hello_rect.py | 9f1bcbed6deb09a08632a4cee248572761d502d9 | [
"MIT",
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | HighCWu/tpythonpp | 42b56c9eb3c77192cbda36f0e198707bb858fe38 | f1c15e1101993e4c9c7529739823b47759ea13f7 | refs/heads/master | 2023-06-30T16:17:09.409107 | 2021-03-19T04:16:12 | 2021-03-19T04:16:12 | 391,806,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | def test():
print('hello world')
r = rect(0,0, 320, 240)
print(r)
print(r.x)
print(r.y)
print(r.width)
print(r.height)
r.x = 2
print(r.x)
r.y += 0.1
print(r.y)
area = r.get_area()
print(area)
test() | [
"[email protected]"
] | |
90f7ab7711d5790e74f9518e25d8c39a79edafd8 | 7b5828edda7751700ca7002b40a214e39e5f48a8 | /EA/simulation/server_commands/service_npc_commands.py | 07e4d57abbcbb8550b95b267c098277d3e6c293a | [] | no_license | daniela-venuta/Sims-4-Python-Script-Workspace | 54c33dac02f84daed66f46b7307f222fede0fa62 | f408b28fb34626b2e3b2953152343d591a328d66 | refs/heads/main | 2023-03-29T18:08:39.202803 | 2021-03-30T19:00:42 | 2021-03-30T19:00:42 | 353,111,243 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,830 | py | from date_and_time import create_time_span
from sims4.commands import CommandType
import services
import sims4.commands
@sims4.commands.Command('service_npc.request_service', command_type=CommandType.Cheat)
def request_service(service_npc_type:str, household_id=None, _connection=None):
service_npc_tuning = services.service_npc_manager().get(service_npc_type)
if service_npc_tuning is not None:
tgt_client = services.client_manager().get(_connection)
if tgt_client is None:
return False
else:
if household_id is None:
household = tgt_client.household
else:
household_id = int(household_id)
manager = services.household_manager()
household = manager.get(household_id)
if household is None:
household = tgt_client.household
services.current_zone().service_npc_service.request_service(household, service_npc_tuning)
sims4.commands.output('Requesting service {0}'.format(service_npc_type), _connection)
return True
return False
@sims4.commands.Command('service_npc.fake_perform_service')
def fake_perform_service(service_npc_type:str, _connection=None):
service_npc_tuning = services.service_npc_manager().get(service_npc_type)
if service_npc_tuning is not None:
tgt_client = services.client_manager().get(_connection)
if tgt_client is None:
return False
else:
household = tgt_client.household
service_npc_tuning.fake_perform(household)
return True
return False
@sims4.commands.Command('service_npc.cancel_service', command_type=CommandType.Automation)
def cancel_service(service_npc_type:str, max_duration:int=240, _connection=None):
service_npc_tuning = services.service_npc_manager().get(service_npc_type)
if service_npc_tuning is not None:
tgt_client = services.client_manager().get(_connection)
if tgt_client is None:
return False
else:
household = tgt_client.household
services.current_zone().service_npc_service.cancel_service(household, service_npc_tuning)
return True
return False
@sims4.commands.Command('service_npc.toggle_auto_scheduled_services', command_type=CommandType.Automation)
def toggle_auto_scheduled_services(enable:bool=None, max_duration:int=240, _connection=None):
service_npc_service = services.current_zone().service_npc_service
enable_auto_scheduled_services = enable if enable is not None else not service_npc_service._auto_scheduled_services_enabled
service_npc_service._auto_scheduled_services_enabled = enable_auto_scheduled_services
return True
| [
"[email protected]"
] | |
7ba1d723327bdcf4aef6f5d70f72674ce22431c7 | 5774101105b47d78adb7a57eefdfa21502bbd70c | /python 语法基础/d14_tkinter_python图形开发界面库/tkinter/3.button控件.py | 82895d1a97556d89642d20d2a44eb01bb5377143 | [] | no_license | zhlthunder/python-study | 34d928f0ebbdcd5543ae0f41baaea955c92f5c56 | 0f25dd5105ba46791842d66babbe4c3a64819ee5 | refs/heads/master | 2023-01-12T18:39:47.184978 | 2018-10-07T23:48:04 | 2018-10-07T23:48:04 | 90,516,611 | 0 | 1 | null | 2022-12-26T19:46:22 | 2017-05-07T07:39:48 | HTML | UTF-8 | Python | false | false | 615 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#author:zhl
import tkinter
def func():
print("zhl is good man")
win=tkinter.Tk()
win.title("zhl")
win.geometry("400x400+200+0")
##text:定义按钮上显示的命名
##command 定义点击按钮触发的函数
##height,width: 设置按钮的宽高
button1=tkinter.Button(win,text="按钮",command=func,width=5,height=5)
button1.pack()
button2=tkinter.Button(win,text="按钮",command=lambda:print("it is button2"),width=5,height=5)
button2.pack()
button3=tkinter.Button(win,text="退出",command=win.quit,width=5,height=5)
button3.pack()
win.mainloop() | [
"[email protected]"
] | |
3537ab717502779be66add592bf5cff21cb46dca | 322e3003cc14c9beb7aa47363ca3c2f6038b82d5 | /lecture6/pyspark/basics.py | b7cd3c6d03c81beae10b26d0f9da81724997ec3c | [] | no_license | danielvachalek/MLOps | 039a393c71a418383ea46338e2d415e7c3936b56 | 0746e0380b73d93b2f12a22df04a74de7daf18a0 | refs/heads/master | 2023-02-09T01:21:53.874657 | 2021-01-02T22:49:32 | 2021-01-02T22:49:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,011 | py | # Databricks notebook source
# MAGIC %md In Cmd 2, the AWS_ACCESS_KEY and AWS_SECRET_KEY variables are set and kept hidden.
# COMMAND ----------
AWS_ACCESS_KEY = "AA"
AWS_SECRET_KEY = "BB"
# COMMAND ----------
sc._jsc.hadoopConfiguration().set("fs.s3n.awsAccessKeyId", AWS_ACCESS_KEY)
sc._jsc.hadoopConfiguration().set("fs.s3n.awsSecretAccessKey", AWS_SECRET_KEY)
# COMMAND ----------
df = spark.read.csv("s3://databricks-recsys/u.data",header=True, sep="\t",inferSchema = True)
display(df)
# COMMAND ----------
s3path = "s3://databricks-recsys/"
df.write.parquet(s3path+"u.parquet")
# COMMAND ----------
df_parquet = spark.read.parquet(s3path+"u.parquet").show()
# COMMAND ----------
pdf = df.toPandas()
# COMMAND ----------
pdf.head()
# COMMAND ----------
sdf = sqlContext.createDataFrame(pdf)
# COMMAND ----------
sdf.describe()
# COMMAND ----------
sdf.printSchema()
# COMMAND ----------
import databricks.koalas as ks
kdf = sdf.to_koalas()
kdf['iid'].to_numpy()[:3]
# COMMAND ----------
type(ks.from_pandas(pdf))
# COMMAND ----------
sdf.createOrReplaceTempView('sdf')
# COMMAND ----------
query = 'select distinct iid from sdf order by iid'
spark.sql(query).show()
# COMMAND ----------
movies_sdf = spark.read.csv("s3://databricks-recsys/movies_raw.dat",header=False, sep="|",inferSchema = True)
display(movies_sdf)
# COMMAND ----------
movies_sdf.createOrReplaceTempView('movies_sdf')
# COMMAND ----------
query = """
select sdf.iid, avg(sdf.rating) as avg_rating, count(sdf.rating) as num_rating, first(movies_sdf._c1) as movie
from sdf,movies_sdf
where sdf.iid = movies_sdf._c0
group by iid
having num_rating >= 5
order by avg_rating desc
limit 10
"""
top_movies_sdf = spark.sql(query)
# COMMAND ----------
top_movies_kdf = top_movies_sdf.to_koalas()
top_movies_kdf.head()
# COMMAND ----------
display(top_movies_sdf)
# COMMAND ----------
sdf_grouped = sdf.groupBy("iid").agg({'rating':'avg'})
pdf_grouped = sdf_grouped.toPandas()
len(pdf_grouped)
| [
"[email protected]"
] | |
79e82a9736205ebba06486b564cb8925c6d74af9 | 8acffb8c4ddca5bfef910e58d3faa0e4de83fce8 | /ml-flask/Lib/site-packages/torch/_utils.py | 55f737a5974002ee337bfdaf7d920c2472a0fe84 | [
"MIT"
] | permissive | YaminiHP/SimilitudeApp | 8cbde52caec3c19d5fa73508fc005f38f79b8418 | 005c59894d8788c97be16ec420c0a43aaec99b80 | refs/heads/master | 2023-06-27T00:03:00.404080 | 2021-07-25T17:51:27 | 2021-07-25T17:51:27 | 389,390,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:03a04fffa1996df6bcab4d2bf79566d0e6b7d661fe5e37b292b5a766d648edfa
size 19786
| [
"[email protected]"
] | |
e554ed1e3f0ef7ab8afac5e92e7db32e3179c2ce | 05a090ee8f9d6dc6bbcc3d20cf8d4a7c8a627bde | /kash/migrations/0003_auto_20201203_1658.py | 9da1a6f5d84271275e7fa8f9401985f96c9af90e | [] | no_license | Komilcoder/kash_app | 527e84c63b03264f72aba4e2d3039a219beae556 | 88ab937c3391b1104bbdbf733da49634ea645ecf | refs/heads/master | 2023-01-21T00:46:24.482017 | 2020-12-03T15:52:01 | 2020-12-03T15:52:01 | 318,185,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,520 | py | # Generated by Django 3.1.4 on 2020-12-03 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kash', '0002_auto_20201203_1648'),
]
operations = [
migrations.RemoveField(
model_name='news',
name='description',
),
migrations.RemoveField(
model_name='news',
name='title',
),
migrations.RemoveField(
model_name='tag',
name='name',
),
migrations.AddField(
model_name='news',
name='description_ru',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='news',
name='description_uz',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='news',
name='title_ru',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='news',
name='title_uz',
field=models.CharField(max_length=250, null=True),
),
migrations.AddField(
model_name='tag',
name='name_ru',
field=models.CharField(max_length=100, null=True),
),
migrations.AddField(
model_name='tag',
name='name_uz',
field=models.CharField(max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
a4aa3cb66427702daeca11f1eba49736ef4dd8e8 | e81576012330e6a6024d14f3e241f88ca34b73cd | /python_code/vnev/Lib/site-packages/jdcloud_sdk/services/cps/models/Listener.py | f4508baf200e067c53fd0aa337482b86f24fb929 | [
"MIT"
] | permissive | Ureimu/weather-robot | eba6a84147755aa83c941a306bac1a7c4e95e23e | 7634195af388538a566ccea9f8a8534c5fb0f4b6 | refs/heads/master | 2021-01-15T07:23:42.274413 | 2020-03-23T02:30:19 | 2020-03-23T02:30:19 | 242,912,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,761 | py | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
class Listener(object):
def __init__(self, listenerId=None, loadBalancerId=None, protocol=None, port=None, algorithm=None, stickySession=None, realIp=None, status=None, name=None, description=None, healthCheck=None, healthCheckTimeout=None, healthCheckInterval=None, healthyThreshold=None, unhealthyThreshold=None, healthCheckIp=None, serverGroupId=None):
"""
:param listenerId: (Optional) 监听器ID
:param loadBalancerId: (Optional) 负载均衡ID
:param protocol: (Optional) 协议
:param port: (Optional) 端口
:param algorithm: (Optional) 调度算法
:param stickySession: (Optional) 会话保持状态,取值on|off
:param realIp: (Optional) 获取真实ip
:param status: (Optional) 状态
:param name: (Optional) 名称
:param description: (Optional) 描述
:param healthCheck: (Optional) 健康检查状态,取值on|off
:param healthCheckTimeout: (Optional) 健康检查响应的最大超时时间,单位s
:param healthCheckInterval: (Optional) 健康检查响应的最大间隔时间,单位s
:param healthyThreshold: (Optional) 健康检查结果为success的阈值
:param unhealthyThreshold: (Optional) 健康检查结果为fail的阈值
:param healthCheckIp: (Optional) 健康检查ip
:param serverGroupId: (Optional) 服务器组id
"""
self.listenerId = listenerId
self.loadBalancerId = loadBalancerId
self.protocol = protocol
self.port = port
self.algorithm = algorithm
self.stickySession = stickySession
self.realIp = realIp
self.status = status
self.name = name
self.description = description
self.healthCheck = healthCheck
self.healthCheckTimeout = healthCheckTimeout
self.healthCheckInterval = healthCheckInterval
self.healthyThreshold = healthyThreshold
self.unhealthyThreshold = unhealthyThreshold
self.healthCheckIp = healthCheckIp
self.serverGroupId = serverGroupId
| [
"[email protected]"
] | |
d089134e584d9b0d118d8a1e547907c28db88b65 | 9bc9885e9500083afc2cd6be4ff93ee2eb4fbfbb | /neuropower/apps/designtoolbox/migrations/0016_auto_20160907_1914.py | 5dbb860d6d601d9fe11f927ee47970a6189b6b04 | [
"MIT"
] | permissive | jokedurnez/neuropower | 50297af01bef55fe2c01355f038a9d184cde493d | ed8c1cf29d447b41dfbfbc7a8345443454e62a96 | refs/heads/master | 2021-01-15T08:36:45.191330 | 2016-11-20T00:56:30 | 2016-11-20T00:56:30 | 51,338,446 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 930 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-09-07 19:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('designtoolbox', '0015_auto_20160905_1717'),
]
operations = [
migrations.AlterField(
model_name='designmodel',
name='W1',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='designmodel',
name='W2',
field=models.FloatField(default=0.5),
),
migrations.AlterField(
model_name='designmodel',
name='cycles',
field=models.IntegerField(default=100),
),
migrations.AlterField(
model_name='designmodel',
name='preruncycles',
field=models.IntegerField(default=10),
),
]
| [
"[email protected]"
] | |
74e74455de014475d07e51604e15ca1764c43ed9 | 7684ffabb75ed2d6396d3a720c56ed0ee09ee77d | /crack_detection/gcloud/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/oslogin/v1alpha/oslogin_v1alpha_client.py | 17a96db3d16f3b91f1861878024aa4ac72c32e1d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/CrackPropAPI | da7cc7a1ef046d20992423f7c7a148e390bb70e7 | 24c0cfd1f258eeaa7e5e953253b5d778f2fbecb5 | refs/heads/master | 2022-11-09T07:15:41.142453 | 2020-07-02T14:34:15 | 2020-07-02T14:34:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,176 | py | """Generated client library for oslogin version v1alpha."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.oslogin.v1alpha import oslogin_v1alpha_messages as messages
class OsloginV1alpha(base_api.BaseApiClient):
"""Generated client library for service oslogin version v1alpha."""
MESSAGES_MODULE = messages
BASE_URL = u'https://oslogin.googleapis.com/'
MTLS_BASE_URL = u'https://oslogin.mtls.googleapis.com/'
_PACKAGE = u'oslogin'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/compute', u'https://www.googleapis.com/auth/compute.readonly']
_VERSION = u'v1alpha'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = u'google-cloud-sdk'
_CLIENT_CLASS_NAME = u'OsloginV1alpha'
_URL_VERSION = u'v1alpha'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new oslogin handle."""
url = url or self.BASE_URL
super(OsloginV1alpha, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.users_projects = self.UsersProjectsService(self)
self.users_sshPublicKeys = self.UsersSshPublicKeysService(self)
self.users = self.UsersService(self)
class UsersProjectsService(base_api.BaseApiService):
"""Service class for the users_projects resource."""
_NAME = u'users_projects'
def __init__(self, client):
super(OsloginV1alpha.UsersProjectsService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes a POSIX account.
Args:
request: (OsloginUsersProjectsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha/users/{usersId}/projects/{projectsId}',
http_method=u'DELETE',
method_id=u'oslogin.users.projects.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'operatingSystemType'],
relative_path=u'v1alpha/{+name}',
request_field='',
request_type_name=u'OsloginUsersProjectsDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
class UsersSshPublicKeysService(base_api.BaseApiService):
"""Service class for the users_sshPublicKeys resource."""
_NAME = u'users_sshPublicKeys'
def __init__(self, client):
super(OsloginV1alpha.UsersSshPublicKeysService, self).__init__(client)
self._upload_configs = {
}
def Delete(self, request, global_params=None):
r"""Deletes an SSH public key.
Args:
request: (OsloginUsersSshPublicKeysDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha/users/{usersId}/sshPublicKeys/{sshPublicKeysId}',
http_method=u'DELETE',
method_id=u'oslogin.users.sshPublicKeys.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha/{+name}',
request_field='',
request_type_name=u'OsloginUsersSshPublicKeysDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Retrieves an SSH public key.
Args:
request: (OsloginUsersSshPublicKeysGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SshPublicKey) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha/users/{usersId}/sshPublicKeys/{sshPublicKeysId}',
http_method=u'GET',
method_id=u'oslogin.users.sshPublicKeys.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1alpha/{+name}',
request_field='',
request_type_name=u'OsloginUsersSshPublicKeysGetRequest',
response_type_name=u'SshPublicKey',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates an SSH public key and returns the profile information. This method.
supports patch semantics.
Args:
request: (OsloginUsersSshPublicKeysPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(SshPublicKey) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha/users/{usersId}/sshPublicKeys/{sshPublicKeysId}',
http_method=u'PATCH',
method_id=u'oslogin.users.sshPublicKeys.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1alpha/{+name}',
request_field=u'sshPublicKey',
request_type_name=u'OsloginUsersSshPublicKeysPatchRequest',
response_type_name=u'SshPublicKey',
supports_download=False,
)
class UsersService(base_api.BaseApiService):
"""Service class for the users resource."""
_NAME = u'users'
def __init__(self, client):
super(OsloginV1alpha.UsersService, self).__init__(client)
self._upload_configs = {
}
def GetLoginProfile(self, request, global_params=None):
r"""Retrieves the profile information used for logging in to a virtual machine.
on Google Compute Engine.
Args:
request: (OsloginUsersGetLoginProfileRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(LoginProfile) The response message.
"""
config = self.GetMethodConfig('GetLoginProfile')
return self._RunMethod(
config, request, global_params=global_params)
GetLoginProfile.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha/users/{usersId}/loginProfile',
http_method=u'GET',
method_id=u'oslogin.users.getLoginProfile',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'operatingSystemType', u'projectId', u'systemId'],
relative_path=u'v1alpha/{+name}/loginProfile',
request_field='',
request_type_name=u'OsloginUsersGetLoginProfileRequest',
response_type_name=u'LoginProfile',
supports_download=False,
)
def ImportSshPublicKey(self, request, global_params=None):
r"""Adds an SSH public key and returns the profile information. Default POSIX.
account information is set when no username and UID exist as part of the
login profile.
Args:
request: (OsloginUsersImportSshPublicKeyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ImportSshPublicKeyResponse) The response message.
"""
config = self.GetMethodConfig('ImportSshPublicKey')
return self._RunMethod(
config, request, global_params=global_params)
ImportSshPublicKey.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1alpha/users/{usersId}:importSshPublicKey',
http_method=u'POST',
method_id=u'oslogin.users.importSshPublicKey',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'projectId'],
relative_path=u'v1alpha/{+parent}:importSshPublicKey',
request_field=u'sshPublicKey',
request_type_name=u'OsloginUsersImportSshPublicKeyRequest',
response_type_name=u'ImportSshPublicKeyResponse',
supports_download=False,
)
| [
"[email protected]"
] | |
36baeb280fe445880f582412b5f140997661f413 | f4f181f2c970a163801b4202fc8d6c92a4e8113d | /google-cloud-sdk/lib/googlecloudsdk/api_lib/compute/addresses_utils.py | c24143684ff0332856f3e3da17ca890a539e8ee5 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | Sorsly/subtle | 7732a6cb910f5e2f4eed1ac0d3b5979001582340 | 718e79a3e04f1f57f39b6ebe90dec9e028e88d40 | refs/heads/master | 2021-05-24T01:21:39.218495 | 2017-10-28T01:33:58 | 2017-10-28T01:33:58 | 83,103,372 | 0 | 1 | MIT | 2020-07-25T11:21:05 | 2017-02-25T03:33:07 | Python | UTF-8 | Python | false | false | 3,225 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common classes and functions for addresses."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import name_generator
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
class AddressesMutator(base_classes.BaseAsyncMutator):
"""Base class for modifying addresses."""
@property
def service(self):
if self.global_request:
return self.compute.globalAddresses
else:
return self.compute.addresses
@property
def resource_type(self):
return 'addresses'
@property
def method(self):
return 'Insert'
def GetAddress(self, args, address, address_ref):
return self.messages.Address(
address=address,
description=args.description,
name=address_ref.Name())
def CreateRequests(self, args):
"""Overrides."""
names, addresses = self._GetNamesAndAddresses(args)
if not args.name:
args.name = names
address_refs = self.ADDRESSES_ARG.ResolveAsResource(
args, self.resources,
scope_lister=compute_flags.GetDefaultScopeLister(
self.compute_client, self.project))
self.global_request = getattr(address_refs[0], 'region', None) is None
requests = []
for address, address_ref in zip(addresses, address_refs):
address_msg = self.GetAddress(
args,
address,
address_ref)
if self.global_request:
requests.append(self.messages.ComputeGlobalAddressesInsertRequest(
address=address_msg, project=address_ref.project))
else:
requests.append(self.messages.ComputeAddressesInsertRequest(
address=address_msg,
region=address_ref.region,
project=address_ref.project))
return requests
def _GetNamesAndAddresses(self, args):
"""Returns names and addresses provided in args."""
if not args.addresses and not args.name:
raise exceptions.ToolException(
'At least one name or address must be provided.')
if args.name:
names = args.name
else:
# If we dont have any names then we must some addresses.
names = [name_generator.GenerateRandomName() for _ in args.addresses]
if args.addresses:
addresses = args.addresses
else:
# If we dont have any addresses then we must some names.
addresses = [None] * len(args.name)
if len(addresses) != len(names):
raise exceptions.ToolException(
'If providing both, you must specify the same number of names as '
'addresses.')
return names, addresses
| [
"[email protected]"
] | |
e699cda227ba68d9a3a9122ca69be7e2ae5c1a57 | 67ddedc825a4852349bb3e54f7d31cdeb34c64aa | /contrib/testgen/gen_key_io_test_vectors.py | cfcf70fa73dc3d37f054a9d4084749fe247c87dc | [
"MIT"
] | permissive | geranium-coin/geranium | 3500632ed8e666d30d1b28494b1b7b5003c18ecc | 93c08aa10ea151f4efd8337c1d5599ee7e8d58ea | refs/heads/master | 2022-07-28T21:28:55.717800 | 2022-01-10T17:30:13 | 2022-01-10T17:30:13 | 440,774,432 | 2 | 0 | MIT | 2022-01-04T08:33:10 | 2021-12-22T07:39:53 | C++ | UTF-8 | Python | false | false | 10,266 | py | #!/usr/bin/env python3
# Copyright (c) 2012-2018 The Geranium Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py valid 50 > ../../src/test/data/key_io_valid.json
PYTHONPATH=../../test/functional/test_framework ./gen_key_io_test_vectors.py invalid 50 > ../../src/test/data/key_io_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
from segwit_addr import bech32_encode, decode, convertbits, CHARSET, Encoding
# key types
PUBKEY_ADDRESS = 0
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PUBKEY_ADDRESS_REGTEST = 111
SCRIPT_ADDRESS_REGTEST = 196
PRIVKEY = 128
PRIVKEY_TEST = 239
PRIVKEY_REGTEST = 239
# script
OP_0 = 0x00
OP_1 = 0x51
OP_2 = 0x52
OP_3 = 0x53
OP_16 = 0x60
OP_DUP = 0x76
OP_EQUAL = 0x87
OP_EQUALVERIFY = 0x88
OP_HASH160 = 0xa9
OP_CHECKSIG = 0xac
pubkey_prefix = (OP_DUP, OP_HASH160, 20)
pubkey_suffix = (OP_EQUALVERIFY, OP_CHECKSIG)
script_prefix = (OP_HASH160, 20)
script_suffix = (OP_EQUAL,)
p2wpkh_prefix = (OP_0, 20)
p2wsh_prefix = (OP_0, 32)
p2tr_prefix = (OP_1, 32)
metadata_keys = ['isPrivkey', 'chain', 'isCompressed', 'tryCaseFlip']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata, output_prefix, output_suffix
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, 'main', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS,), 20, (), (False, 'main', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, 'test', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, 'test', None, None), script_prefix, script_suffix),
((PUBKEY_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), pubkey_prefix, pubkey_suffix),
((SCRIPT_ADDRESS_REGTEST,), 20, (), (False, 'regtest', None, None), script_prefix, script_suffix),
((PRIVKEY,), 32, (), (True, 'main', False, None), (), ()),
((PRIVKEY,), 32, (1,), (True, 'main', True, None), (), ()),
((PRIVKEY_TEST,), 32, (), (True, 'test', False, None), (), ()),
((PRIVKEY_TEST,), 32, (1,), (True, 'test', True, None), (), ()),
((PRIVKEY_REGTEST,), 32, (), (True, 'regtest', False, None), (), ()),
((PRIVKEY_REGTEST,), 32, (1,), (True, 'regtest', True, None), (), ())
]
# templates for valid bech32 sequences
bech32_templates = [
# hrp, version, witprog_size, metadata, encoding, output_prefix
('bc', 0, 20, (False, 'main', None, True), Encoding.BECH32, p2wpkh_prefix),
('bc', 0, 32, (False, 'main', None, True), Encoding.BECH32, p2wsh_prefix),
('bc', 1, 32, (False, 'main', None, True), Encoding.BECH32M, p2tr_prefix),
('bc', 2, 2, (False, 'main', None, True), Encoding.BECH32M, (OP_2, 2)),
('tb', 0, 20, (False, 'test', None, True), Encoding.BECH32, p2wpkh_prefix),
('tb', 0, 32, (False, 'test', None, True), Encoding.BECH32, p2wsh_prefix),
('tb', 1, 32, (False, 'test', None, True), Encoding.BECH32M, p2tr_prefix),
('tb', 3, 16, (False, 'test', None, True), Encoding.BECH32M, (OP_3, 16)),
('bcrt', 0, 20, (False, 'regtest', None, True), Encoding.BECH32, p2wpkh_prefix),
('bcrt', 0, 32, (False, 'regtest', None, True), Encoding.BECH32, p2wsh_prefix),
('bcrt', 1, 32, (False, 'regtest', None, True), Encoding.BECH32M, p2tr_prefix),
('bcrt', 16, 40, (False, 'regtest', None, True), Encoding.BECH32M, (OP_16, 40))
]
# templates for invalid bech32 sequences
bech32_ng_templates = [
# hrp, version, witprog_size, encoding, invalid_bech32, invalid_checksum, invalid_char
('tc', 0, 20, Encoding.BECH32, False, False, False),
('bt', 1, 32, Encoding.BECH32M, False, False, False),
('tb', 17, 32, Encoding.BECH32M, False, False, False),
('bcrt', 3, 1, Encoding.BECH32M, False, False, False),
('bc', 15, 41, Encoding.BECH32M, False, False, False),
('tb', 0, 16, Encoding.BECH32, False, False, False),
('bcrt', 0, 32, Encoding.BECH32, True, False, False),
('bc', 0, 16, Encoding.BECH32, True, False, False),
('tb', 0, 32, Encoding.BECH32, False, True, False),
('bcrt', 0, 20, Encoding.BECH32, False, False, True),
('bc', 0, 20, Encoding.BECH32M, False, False, False),
('tb', 0, 32, Encoding.BECH32M, False, False, False),
('bcrt', 0, 20, Encoding.BECH32M, False, False, False),
('bc', 1, 32, Encoding.BECH32, False, False, False),
('tb', 2, 16, Encoding.BECH32, False, False, False),
('bcrt', 16, 20, Encoding.BECH32, False, False, False),
]
def is_valid(v):
'''Check vector v for validity'''
if len(set(v) - set(b58chars)) > 0:
return is_valid_bech32(v)
result = b58decode_chk(v)
if result is None:
return is_valid_bech32(v)
for template in templates:
prefix = bytearray(template[0])
suffix = bytearray(template[2])
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return is_valid_bech32(v)
def is_valid_bech32(v):
'''Check vector v for bech32 validity'''
for hrp in ['bc', 'tb', 'bcrt']:
if decode(hrp, v) != (None, None):
return True
return False
def gen_valid_base58_vector(template):
'''Generate valid base58 vector'''
prefix = bytearray(template[0])
payload = bytearray(os.urandom(template[1]))
suffix = bytearray(template[2])
dst_prefix = bytearray(template[4])
dst_suffix = bytearray(template[5])
rv = b58encode_chk(prefix + payload + suffix)
return rv, dst_prefix + payload + dst_suffix
def gen_valid_bech32_vector(template):
'''Generate valid bech32 vector'''
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
encoding = template[4]
dst_prefix = bytearray(template[5])
rv = bech32_encode(encoding, hrp, [witver] + convertbits(witprog, 8, 5))
return rv, dst_prefix + witprog
def gen_valid_vectors():
'''Generate valid test vectors'''
glist = [gen_valid_base58_vector, gen_valid_bech32_vector]
tlist = [templates, bech32_templates]
while True:
for template, valid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
rv, payload = valid_vector_generator(template)
assert is_valid(rv)
metadata = {x: y for x, y in zip(metadata_keys,template[3]) if y is not None}
hexrepr = b2a_hex(payload)
if isinstance(hexrepr, bytes):
hexrepr = hexrepr.decode('utf8')
yield (rv, hexrepr, metadata)
def gen_invalid_base58_vector(template):
'''Generate possibly invalid vector'''
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
corrupt_prefix = randbool(0.2)
randomize_payload_size = randbool(0.2)
corrupt_suffix = randbool(0.2)
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = bytearray(template[0])
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = bytearray(template[2])
val = b58encode_chk(prefix + payload + suffix)
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
return val
def gen_invalid_bech32_vector(template):
'''Generate possibly invalid bech32 vector'''
no_data = randbool(0.1)
to_upper = randbool(0.1)
hrp = template[0]
witver = template[1]
witprog = bytearray(os.urandom(template[2]))
encoding = template[3]
if no_data:
rv = bech32_encode(encoding, hrp, [])
else:
data = [witver] + convertbits(witprog, 8, 5)
if template[4] and not no_data:
if template[2] % 5 in {2, 4}:
data[-1] |= 1
else:
data.append(0)
rv = bech32_encode(encoding, hrp, data)
if template[5]:
i = len(rv) - random.randrange(1, 7)
rv = rv[:i] + random.choice(CHARSET.replace(rv[i], '')) + rv[i + 1:]
if template[6]:
i = len(hrp) + 1 + random.randrange(0, len(rv) - len(hrp) - 4)
rv = rv[:i] + rv[i:i + 4].upper() + rv[i + 4:]
if to_upper:
rv = rv.swapcase()
return rv
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
glist = [gen_invalid_base58_vector, gen_invalid_bech32_vector]
tlist = [templates, bech32_ng_templates]
while True:
for template, invalid_vector_generator in [(t, g) for g, l in zip(glist, tlist) for t in l]:
val = invalid_vector_generator(template)
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys
import json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| [
"[email protected]"
] | |
10ef50de6e155b9bb542e5c845172fff8a2bb9e6 | 16e26614611ae87de81388b435d88b142ca6189e | /pywind/decc/Report.py | d66f3fe5a42edfea4de68937f5228223d475ef93 | [] | no_license | tomwadley/pywind | d1d36007b0196730cba1389ef7940dd0ccabe5df | 0d86ff1c9a67b2f446e62c1471257e38bdc1d03c | refs/heads/master | 2021-04-09T15:41:49.823115 | 2013-06-26T09:18:24 | 2013-06-26T09:18:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,726 | py | from cookielib import CookieJar
import csv
from datetime import datetime
import urllib2
from pywind.decc.geo import osGridToLatLong, LatLon
def field_to_attr(fld):
fld = fld.lower()
for c in [' ', '-', '/']:
fld = fld.replace(c, '_')
return fld
class DeccRecord(object):
FIELDS = ['Reference',
'NFFO/SRO/NI-NFFO/Non-NFFO',
'General Technology',
'Technology Type',
'Section 36',
'Contractor (/Applicant)',
'Site Name',
'Installed Capacity (Elec)',
'CHP',
'OffShore Wind Round',
'Address 1',
'Address 2',
'Address 3',
'Address 4',
'Town',
'County',
'District',
'Region',
'Country',
'X Coord',
'Y Coord',
'Pre-consent',
'Post-consent',
'Application Submitted',
'Application Determined',
'Construction Date',
'Planning Officer Recommendation',
'Appeal Determined',
'Appeal Ref Number',
'Date on which generation commenced',
'Green Belt',
'National Park',
'AONB',
'Heritage Coast',
'Special Landscape Area',
'Employment Use',
'Natural Environment',
'Other Land Use',
'Built Heritage/ Archaeology',
'Project Specific',
'Relevant Supporting Details',
'Developer Last Contacted',
'LPA / CC Last Contacted',
'LPA Name',
'Record Last Updated'
]
DATE_FIELDS = ['record_last_updated',
'application_submitted',
'application_determined',
'appeal_determined'
]
BOOLEAN_FIELDS = ['section_36',
'green_belt',
'national_park',
'aonb',
'heritage_coast',
'special_landscape_area',
'employment_use',
'natural_environment',
'other_land_use',
'built_heritage__archaeology',
'project_specific'
]
INT_FIELDS = ['x_coord', 'y_coord']
def __init__(self, row):
for i in range(len(self.FIELDS)):
attr = field_to_attr(self.FIELDS[i])
setattr(self, attr, row[i])
for f in self.BOOLEAN_FIELDS:
val = getattr(self, f, None)
if val is None:
continue
setattr(self, f, False if val.lower() == 'false' else True)
for f in self.DATE_FIELDS:
val = getattr(self, f, None)
if val is None:
continue
if val == '':
setattr(self, f, None)
else:
setattr(self, f, datetime.strptime(val, "%Y-%m-%d").date())
for f in self.INT_FIELDS:
val = getattr(self, f, 0)
if val == '':
val = 0
setattr(self, f, float(val))
mw_capacity = getattr(self, 'installed_capacity_(elec)', 0)
mw_capacity = float(mw_capacity.replace(',', ''))
setattr(self, 'installed_capacity_(elec)', mw_capacity * 1000)
setattr(self, 'capacity', getattr(self, 'installed_capacity_(elec)'))
# Convert x,y to lat/lon
latlon = osGridToLatLong(int(self.x_coord), self.y_coord)
latlon.convert(LatLon.WGS84)
setattr(self, 'lat', latlon.lat)
setattr(self, 'lon', latlon.lon)
def Dump(self):
for f in self.FIELDS:
print "%-30s: %s" % (f, getattr(self, field_to_attr(f), ''))
class MonthlyExtract(object):
URL = "https://restats.decc.gov.uk/app/reporting/decc/monthlyextract/style/csv/csvwhich/reporting.decc.monthlyextract"
def __init__(self):
self.cookieJar = CookieJar()
cookie_handler = urllib2.HTTPCookieProcessor(self.cookieJar)
httpsHandler = urllib2.HTTPSHandler(debuglevel = 0)
self.opener = urllib2.build_opener(cookie_handler, httpsHandler)
self.records = []
def __len__(self):
return len(self.records)
def get_data(self):
resp = self.opener.open(self.URL)
if resp.code != 200:
return False
reader = csv.reader(resp)
for row in reader:
if row[0] == 'Reference':
continue
d = DeccRecord(row)
self.records.append(d)
return True
| [
"[email protected]"
] | |
47005a3669df2d29e09979c2bfd2bb18fede9e59 | b7e52aeabebf7448e31723d406755809cac63099 | /source/calc_fsun_tree/SConstruct | 9e270b942ba10dbe21a51f6c85d05cb22f763ba5 | [
"BSD-3-Clause"
] | permissive | bucricket/projectMASviirs | df31af86e024499ff87d2c2b707e3b9d24813f7c | 705abc89505122351f0ef78e0edb950b7e3b7f48 | refs/heads/master | 2021-01-01T18:31:16.748864 | 2018-05-30T15:14:07 | 2018-05-30T15:14:07 | 98,354,619 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,013 | #!python
import os
import platform
import subprocess
AddOption('--prefix',
dest='prefix',
type='string',
nargs=1,
action='store',
metavar='DIR',
help='installation prefix')
env = Environment(PREFIX = GetOption('prefix'))
prefix = os.environ.get('PREFIX')
base1 = os.path.abspath(os.path.join(prefix,os.pardir))
base = os.path.join(base1,'work')
sourcePath = os.path.join(base,'source')
binPath = os.path.join(prefix,'bin')
# Comment lines start with the # symbol
# The following sets up an Compile Environment Object with gfortran as the linker.
env = Environment(LINK='gfortran')
env.Append(F90FLAGS = ['-ffree-line-length-512'])
# The next line of code is an array of the source files names used in the program.
# The next line is the actual code that links the executable. env.Program is generates an executable.
make_csv = env.Program(target='make_csv', source= 'make_csv.f90')
env.Install(binPath, make_csv)
env.Alias('install', binPath)
| [
"[email protected]"
] | ||
030faf212e0c96085fe19ef5907653e0f6de769f | cc6e36ce306a46c1accc3e979362de34b6063b7e | /game/management/commands/import_games.py | 1be5196a35dfa7b39b0710727bda8fd09f034eca | [] | no_license | bartromgens/petanque-stats-server | d51995e2b4d288a0a99563347c3bf3db863918bf | 9f7e48a7670b1c2c89f1bfcb2ac5ed8c8e9a7fe0 | refs/heads/master | 2020-03-22T19:23:18.230361 | 2018-07-29T00:46:02 | 2018-07-29T00:46:02 | 140,524,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,058 | py | import json
import uuid
from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from django.db import transaction
from game.models import Game, Team, ScoreTeam, Player
class Command(BaseCommand):
help = 'Import scores from a custom format'
players_filepath = 'data/players.json'
games_filepath = 'data/games.json'
@transaction.atomic
def handle(self, *args, **options):
print('BEGIN: import games')
# Command.create_admin()
Command.create_users()
games = Command.create_games()
for game in games:
print(game)
print('END: import games')
@staticmethod
def create_admin():
User.objects.create_superuser(
username='admin',
email='[email protected]',
password='admin'
)
@staticmethod
def create_users():
with open(Command.players_filepath, 'r') as players_file:
players_json = json.load(players_file)
for username in players_json['players']:
User.objects.create_user(
username=username,
email=username + '@test.com',
password=uuid.uuid4().hex[:10]
)
@staticmethod
def create_games():
games = []
with open(Command.games_filepath, 'r') as players_file:
games_json = json.load(players_file)
for game_data in games_json['games']:
games.append(Command.create_game(game_data))
return games
@staticmethod
def create_game(game_data):
game = Game.objects.create(max_score=game_data['max_score'])
for score in game_data['scores']:
team_players_ids = []
for name in score['players']:
team_players_ids.append(Player.get_by_name(name).id)
team = Team.get_or_create_team(team_players_ids)
game.teams.add(team)
ScoreTeam.objects.create(team=team, game=game, score=score['score'])
game.save()
return game
| [
"[email protected]"
] | |
6d25cc07becb9e59f730a67748abcca1e17b92d4 | 770f7b7155c33d2f8c27846b93b9b73db45b2e2a | /gofedinfra/system/plugins/simpleetcdstorage/fakeartefactdriver.py | 10293902c919cd5c9cb74e6cf1c783345335fd8e | [] | no_license | gofed/infra | b0f6186486e8aa7c8c640411ee92d6648cbc77ec | 2f402bbdf1e5fa7cb68262cc3408a2fc1436269f | refs/heads/master | 2022-10-16T02:46:09.226939 | 2018-06-07T23:16:44 | 2018-06-08T11:31:37 | 48,703,326 | 1 | 5 | null | 2022-10-11T11:17:16 | 2015-12-28T17:08:28 | Python | UTF-8 | Python | false | false | 346 | py | from .artefactdriver import ArtefactDriver
class FakeArtefactDriver(ArtefactDriver):
def __init__(self, artefact):
ArtefactDriver.__init__(self, artefact)
self.data = {}
def store(self, input):
key = self._generateKey(input)
self.data[key] = input
def retrieve(self, data):
key = self._generateKey(data)
return self.data[key]
| [
"[email protected]"
] | |
742e69c1a22297de8f0a8cd58cecab3389d6f888 | a281d09ed91914b134028c3a9f11f0beb69a9089 | /tests/integration/docusaurus/connecting_to_your_data/cloud/gcs/pandas/inferred_and_runtime_yaml_example.py | e7ed0539f4232487def269d75f60499c9e167e07 | [
"Apache-2.0"
] | permissive | CarstenFrommhold/great_expectations | 4e67bbf43d21bc414f56d576704259a4eca283a5 | 23d61c5ed26689d6ff9cec647cc35712ad744559 | refs/heads/develop | 2023-01-08T10:01:12.074165 | 2022-11-29T18:50:18 | 2022-11-29T18:50:18 | 311,708,429 | 0 | 0 | Apache-2.0 | 2020-11-10T15:52:05 | 2020-11-10T15:52:04 | null | UTF-8 | Python | false | false | 4,209 | py | from typing import List
# <snippet>
from ruamel import yaml
import great_expectations as ge
from great_expectations.core.batch import Batch, BatchRequest, RuntimeBatchRequest
# </snippet>
# <snippet>
context = ge.get_context()
# </snippet>
# <snippet>
datasource_yaml = rf"""
name: my_gcs_datasource
class_name: Datasource
execution_engine:
class_name: PandasExecutionEngine
data_connectors:
default_runtime_data_connector_name:
class_name: RuntimeDataConnector
batch_identifiers:
- default_identifier_name
default_inferred_data_connector_name:
class_name: InferredAssetGCSDataConnector
bucket_or_name: <YOUR_GCS_BUCKET_HERE>
prefix: <BUCKET_PATH_TO_DATA>
default_regex:
pattern: (.*)\.csv
group_names:
- data_asset_name
"""
# </snippet>
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the yaml above.
datasource_yaml = datasource_yaml.replace("<YOUR_GCS_BUCKET_HERE>", "test_docs_data")
datasource_yaml = datasource_yaml.replace(
"<BUCKET_PATH_TO_DATA>", "data/taxi_yellow_tripdata_samples"
)
context.test_yaml_config(datasource_yaml)
# <snippet>
context.add_datasource(**yaml.load(datasource_yaml))
# </snippet>
# Here is a RuntimeBatchRequest using a path to a single CSV file
# <snippet>
batch_request = RuntimeBatchRequest(
datasource_name="my_gcs_datasource",
data_connector_name="default_runtime_data_connector_name",
data_asset_name="<YOUR_MEANGINGFUL_NAME>", # this can be anything that identifies this data_asset for you
runtime_parameters={"path": "<PATH_TO_YOUR_DATA_HERE>"}, # Add your GCS path here.
batch_identifiers={"default_identifier_name": "default_identifier"},
)
# </snippet>
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your path directly in the BatchRequest above.
batch_request.runtime_parameters[
"path"
] = f"gs://test_docs_data/data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-01.csv"
# <snippet>
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# </snippet>
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
batch_list: List[Batch] = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.data.dataframe.shape[0] == 10000
# Here is a BatchRequest naming a data_asset
# <snippet>
batch_request = BatchRequest(
datasource_name="my_gcs_datasource",
data_connector_name="default_inferred_data_connector_name",
data_asset_name="<YOUR_DATA_ASSET_NAME>",
)
# </snippet>
# Please note this override is only to provide good UX for docs and tests.
# In normal usage you'd set your data asset name directly in the BatchRequest above.
batch_request.data_asset_name = (
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-01"
)
context.create_expectation_suite(
expectation_suite_name="test_suite", overwrite_existing=True
)
validator = context.get_validator(
batch_request=batch_request, expectation_suite_name="test_suite"
)
print(validator.head())
# NOTE: The following code is only for testing and can be ignored by users.
assert isinstance(validator, ge.validator.validator.Validator)
assert [ds["name"] for ds in context.list_datasources()] == ["my_gcs_datasource"]
assert set(
context.get_available_data_asset_names()["my_gcs_datasource"][
"default_inferred_data_connector_name"
]
) == {
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-01",
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-02",
"data/taxi_yellow_tripdata_samples/yellow_tripdata_sample_2019-03",
}
batch_list: List[Batch] = context.get_batch_list(batch_request=batch_request)
assert len(batch_list) == 1
batch: Batch = batch_list[0]
assert batch.data.dataframe.shape[0] == 10000
| [
"[email protected]"
] | |
f6b23ffa695566bdecd4607042fa580e550c11b6 | e25b0dc781cc291da63e9af1319666cb3e3fd94b | /Real_exp2/Codes/preprocess_real_data_classes.py | 9158d1c83f17bf31c9c8a6ae3787751cedef4e7e | [] | no_license | nastaran75/JMLR-Reg | 2621e123a847e0d72bccd5938ab8f06d3aa875c1 | 0daff201b4a462817f7f0caef2a87c46706cb612 | refs/heads/master | 2021-02-04T01:50:19.367050 | 2020-03-28T08:22:09 | 2020-03-28T08:22:09 | 243,599,352 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,283 | py | import sys
from os import listdir
from os.path import isfile, join
import numpy.random as rand
import math
import codecs
import csv
import random
#import fasttext
from myutil import *
import numpy as np
import numpy.linalg as LA
from scipy.io import arff
import shutil
#from PIL import Image
# from sklearn.preprocessing import StandardScaler
# from sklearn.decomposition import PCA
#from real_data_classes import *
def check_sc(data_file):
data=load_data( data_file)
# n,dim = data['X'].shape
# for feature in range( dim ):
# data['X'][:,feature] = np.true_divide( data['X'][:,feature], LA.norm( data['X'][:,feature].flatten() ) )
# save( data, data_file)
d_mat = data['dist_mat']
save( d_mat, data_file + '_dist_mat')
del data['dist_mat']
save( data, data_file )
return
def map_y(arr):
return np.array([ x*(float(2)/3) + float(1/3) for x in arr ])
# return
# data=load_data( data_file )
# data['Y'] = map_y( data['Y'])
# data['test']['Y'] = map_y( data['test']['Y'])
# save( data, data_file)
# print np.unique(data['Y'])
# print np.average(data['c']['0.0'].flatten())
# x_norm = LA.norm(data['X'],axis=1)
# a = np.array( [ LA.norm(row)**2 for row in data['X'] ])
# print np.max( a)
return
# data= load_data( data_file)
# print data['0.5']['low'].shape
# plt.plot(data['0.1']['low'])
# plt.show()
# return
# print data['c']['0.001'].shape
# return
x_norm = LA.norm(data['X'],axis=1)
plt.plot( x_norm )
plt.show()
def check_gaussian():
n=100
m=10
std = float( sys.argv[1])
p=float( sys.argv[2])
x = rand.normal(0,std,100)
plt.plot( x**2 , label='continuous')
c=[]
for sample in range(n):
sum = 0
for i in range(m):
x = np.random.uniform(0,1)
if x < p:
sum += 0.25
c.append(float(sum)/m)
plt.plot( c, label = 'discrete')
plt.legend()
plt.grid()
# plt.ylim([0,.5])
plt.show()
def plot_range_of_lambda( data_file):
lamb = float( sys.argv[1])
# def lower_bound_lambda( c,y,x_m):
# l_gamma = float(c)/(y**2)
# print l_gamma
# return l_gamma*x_m / (1-l_gamma)
data= load_data( data_file )
gamma_lower_bound = np.array( [ data['c']['0.5'][i]/float( data['Y'][i]**2 ) for i in range( data['X'].shape[0] ) ] )
gamma_upper_bound = lamb /( lamb + np.max( LA.norm( data['X'], axis = 1 ).flatten() )**2 )
plt.plot( gamma_lower_bound, label = 'gamma lower bound')
plt.plot( gamma_upper_bound* np.ones( data['X'].shape[0] ) , label = 'gamma upper bound')
print np.max( LA.norm( data['X'], axis = 1 ).flatten() )**2
plt.legend()
plt.show()
class Generate_human_error:
def __init__(self, data_file):
# print data_file
self.data = load_data( data_file )
if 'c' in self.data:
del self.data['c']
del self.data['test']['c']
self.n, self.dim = self.data['X'].shape
# sc = StandardScaler()
# self.data['X'] = sc.fit_transform(self.data['X'])
# self.data['test']['X'] = sc.transform( self.data['test']['X'])
def normalize_features(self, delta = .0001 ):
n,dim = self.data['X'].shape
for feature in range( dim ):
self.data['X'][:,feature] = np.true_divide( self.data['X'][:,feature], LA.norm( self.data['X'][:,feature].flatten() ) )
self.data['test']['X'][:,feature] = np.true_divide( self.data['test']['X'][:,feature], LA.norm( self.data['test']['X'][:,feature].flatten() ) )
# print np.max( [ LA.norm(x.flatten()) for x in self.data['X']] )
# self.data['Y']=np.array([ y if y > 0 else delta for y in self.data['Y']])
# self.data['test']['Y']=np.array([ y if y > 0 else delta for y in self.data['test']['Y']])
def white_Gauss(self, std=1, n=1 , upper_bound = False, y_vec = None ):
init_noise = rand.normal(0,std,n)
if upper_bound :
return np.array( [ noise if noise/y < 0.3 else 0.1*y for noise,y in zip(init_noise, y_vec) ])
else:
return init_noise
def data_independent_noise( self, list_of_std, upper_bound = False ):
self.data['c'] = {}
self.data['test']['c']={}
for std in list_of_std:
self.data['c'][str(std)] = self.white_Gauss( std, self.data['Y'].shape[0], upper_bound , self.data['Y'] ) ** 2
self.data['test']['c'][str(std)] = self.white_Gauss( std, self.data['test']['Y'].shape[0], upper_bound, self.data['test']['Y']) ** 2
def variable_std_Gauss( self, std_const ,x ):
n = x.shape[0]
x_norm = LA.norm( x, axis=1 ).flatten()
std_vector = std_const * np.reciprocal( x_norm )
# print 'rnd shape ', rand.normal( 0, 2 , 1 ).shape
tmp = np.array( [ rand.normal( 0, s ,1)[0] for s in std_vector ])
# print 'tmp.shape', tmp.shape
return tmp
def data_dependent_noise( self, list_of_std ):
self.data['c'] = {}
self.data['test']['c']={}
for std in list_of_std:
self.data['c'][str(std)] = self.variable_std_Gauss( std, self.data['X']) ** 2
self.data['test']['c'][str(std)] = self.variable_std_Gauss( std, self.data['test']['X']) ** 2
def modify_y_values( self ):
def get_num_category( y, y_t):
y = np.concatenate(( y.flatten(), y_t.flatten() ), axis = 0 )
return np.unique( y ).shape[0]
def map_range(v, l, h, l_new, h_new):
# print '****'
# print v
# tmp = float(v-l)*(( h_new - l_new)/float( h-l))+ l_new
# print tmp
# return tmp
return float(v-l)*(( h_new - l_new)/float( h-l))+ l_new
num_cat = get_num_category( self.data['Y'], self.data['test']['Y'])
print num_cat
self.data['Y'] = np.array( [ map_range(i, 0, 1, float(1)/num_cat, 1 ) for i in self.data['Y']]).flatten()
self.data['test']['Y'] = np.array( [ map_range(i, 0, 1, float(1)/num_cat, 1 ) for i in self.data['test']['Y']]).flatten()
def get_discrete_noise( self, p , num_cat):
m=10
c=[]
for sample in range( self.n ):
sum = 0
for i in range(m):
x = np.random.uniform(0,1)
if x < p:
sum += (float(1)/num_cat)**2
c.append(float(sum)/m)
return np.array(c)
def discrete_noise( self, list_of_p ):
def get_num_category( y, y_t):
y = np.concatenate(( y.flatten(), y_t.flatten() ), axis = 0 )
return np.unique( y ).shape[0]
num_cat = get_num_category( self.data['Y'], self.data['test']['Y'] )
if 'c' not in self.data:
self.data['c'] = {}
if 'c' not in self.data['test']:
self.data['test']['c']={}
for p in list_of_p:
self.data['c'][str(p)] = self.get_discrete_noise( p, num_cat )
self.data['test']['c'][str(p)] = self.get_discrete_noise( p, num_cat )
def vary_discrete( self, list_of_frac):
def get_num_category( y, y_t):
y = np.concatenate(( y.flatten(), y_t.flatten() ), axis = 0 )
return np.unique( y ).shape[0]
def nearest( i ):
return np.argmin( self.data['dist_mat'][i])
self.normalize_features()
num_cat = get_num_category( self.data['Y'], self.data['test']['Y'])
print 'num_category', num_cat
n=self.data['X'].shape[0]
indices = np.arange( n )
random.shuffle(indices)
# err = (( float(1)/num_cat )**2 )/20
#print self.data['Y']prop={'size': 15},
self.data['low']={}
self.data['c']={}
self.data['test']['c']={}
for frac in list_of_frac:
num_low = int(frac*n)
self.data['low'][str(frac)]=indices[:num_low]
# self.data['c'][str(frac)] = np.array( [ 0.001 if i in self.data['low'][str(frac)] else 0.08 for i in range(n) ] )
# self.data['test']['c'][str(frac)] = np.array( [ 0.001 if nearest(i) in self.data['low'][str(frac)] else 0.15 for i in range( self.data['test']['X'].shape[0]) ] )
# for stare 11 for messidor 0.001
# self.data['c'][str(frac)] = np.array(
# [0.0008 if i in self.data['low'][str(frac)] else 0.08 for i in range(n)])
# self.data['test']['c'][str(frac)] = np.array(
# [0.0008 if nearest(i) in self.data['low'][str(frac)] else 0.25 for i in
# range(self.data['test']['X'].shape[0])])
# messidor final
# self.data['c'][str(frac)] = np.array(
# [0.0001 if i in self.data['low'][str(frac)] else 0.4 for i in range(n)])
# self.data['test']['c'][str(frac)] = np.array(
# [0.0001 if nearest(i) in self.data['low'][str(frac)] else 0.4 for i in
# range(self.data['test']['X'].shape[0])])
#stare11 final
# self.data['c'][str(frac)] = np.array(
# [0.0001 if i in self.data['low'][str(frac)] else 0.1 for i in range(n)])
# self.data['test']['c'][str(frac)] = np.array(
# [0.0001 if nearest(i) in self.data['low'][str(frac)] else 0.25 for i in
# range(self.data['test']['X'].shape[0])])
# stare5 final
self.data['c'][str(frac)] = np.array(
[0.0001 if i in self.data['low'][str(frac)] else 0.1 for i in range(n)])
self.data['test']['c'][str(frac)] = np.array(
[0.0001 if nearest(i) in self.data['low'][str(frac)] else 0.1 for i in
range(self.data['test']['X'].shape[0])])
def save_data(self, data_file):
save( self.data , data_file)
def generate_human_error( path, file_name_list):
option ='vary_discrete'
list_of_std = [0.2, 0.4, 0.6, 0.8]
for file_name in file_name_list:
data_file = path + 'data/' + file_name +'_pca50'
obj = Generate_human_error( data_file )
obj.vary_discrete( list_of_std )
obj.save_data( path + 'data/' + file_name + '_pca50' )
def compute_dist_dict( data_file ):
data = load_data( data_file)
num_test = data['test']['X'].shape[0]
num_train = data['X'].shape[0]
data['dist_mat']=np.zeros((num_test,num_train))
for te,i in zip(data['test']['X'], range(num_test)):
for tr,j in zip(data['X'], range(num_train)):
data['dist_mat'][i,j]=LA.norm( te-tr)
save( data, data_file )
return
# save( data['dist_mat'] , data_file + '_dist_mat')
# dist_dict = {}
# for i, dist_arr in zip(range( num_test), data['dist_mat']):
# dist_dict[str(i)] = np.argmin(dist_arr)
# data['dist_dict'] = dist_dict
# del data['dist_mat']
# save( data, data_file )
def main():
path = '../Real_Data_Results/'
file_name = 'stare5'
generate_human_error( path , [file_name])
print 'done'
compute_dist_dict( path + 'data/' + file_name + '_pca50')
return
if __name__=="__main__":
main()
| [
"[email protected]"
] | |
a49e2981e3bf1e5622c75bb54165b0f55cecfe87 | 31c94ea00f0f6673f161a21a529f2272e7460a34 | /bindapi/routerApi.py | 4ea16209cd1e7101e783bcfeaa0d6ef1853e83b0 | [] | no_license | xgocn/bindapi | 0d3e51f696a9d3ec5dde4e05d1c2d5eb2fe52f5a | 343f07176de43c3e5ffc9b26c479c47c289fdc0e | refs/heads/master | 2023-03-15T08:01:22.550448 | 2018-05-09T06:19:22 | 2018-05-09T06:19:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 595 | py | # -*- coding: utf-8 -*-
# author: kiven
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
from bind.views import DomainViewSet, RecordViewSet, AllDomainViewSet, XfrAclViewSet
router.register(r'domains', DomainViewSet)
router.register(r'records', RecordViewSet)
router.register(r'xfracls', XfrAclViewSet)
router.register(r'alldomains', AllDomainViewSet, base_name='alldomains')
from analyze.views import DomainNodeViewSet, DomainStatusViewSet
router.register(r'domainnodes', DomainNodeViewSet)
router.register(r'domainstatus', DomainStatusViewSet)
| [
"[email protected]"
] | |
17b6a63f5fd62343b7a3cb8a859ed7ef5cd184f7 | 521648e4e12366760da7baff15d35201e0b19a5e | /django_ansible/shell.py | 5dc1d4613af2d30156968e00bd67fc7c89c10b8e | [] | no_license | sashgorokhov/django-ansible | b54f596f7d50d239474eb2d4fd8e85c0da21f959 | ad32255b7c87bcada1bd6c8aa250c1ec52c8cd49 | refs/heads/master | 2021-01-22T07:52:55.457727 | 2017-02-13T19:46:23 | 2017-02-13T19:46:23 | 81,864,171 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 919 | py | import subprocess
import logging
logger = logging.getLogger(__name__)
def _try_decode(b):
try:
return b.decode()
except:
return b
def run(executable, args, env=None, cwd=None, **kwargs):
"""
:param kwargs: Additional arguments passed to subprocess.run function
:rtype: subprocess.CompletedProcess
"""
completed = subprocess.run(
args=args,
executable=executable,
env=env,
cwd=cwd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**kwargs
)
logger.info('$ %s %s (env: %s)', executable, str(args), str(env))
if completed.returncode != 0:
logger.warning('Exited with code %s', completed.returncode)
if completed.stderr:
logger.warning(_try_decode(completed.stderr))
if completed.stdout:
logger.debug(_try_decode(completed.stdout))
return completed
| [
"[email protected]"
] | |
c31338954ced6e76da2274aa4b6340be2e11225e | aa8e6259f33bdcfdf21434da5185f31aa6927195 | /gtf_admm_gird_v1.py | ded9bf78405dafb35b2100bc464273be8ad8622f | [] | no_license | Ryanshuai/graph_trend_filtering_py | b32448cfdc4c50a9dfde144abe73e878891f26de | 243969bf7dd97e483693ac88e45ab2192cd4edbf | refs/heads/master | 2020-11-27T07:13:22.116640 | 2019-12-24T20:56:05 | 2019-12-24T20:56:05 | 229,349,019 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,927 | py | import numpy as np
from numpy.linalg import norm
from grid_system import grid_system_2d, grid_system_3d
from get_Delta_grid import get_Delta_grid
from soft_thresh import soft_thresh
from construct_O import construct_O
def gtf_admm_grid_v1(y: np.array, k, lamb, rho, max_iter=1000):
y_size = y.size
y_shape = y.shape
y_dim = y.ndim
if y_dim == 2:
D = get_Delta_grid(y_shape, 'gtf2d', 0)
elif y_dim == 3:
D = get_Delta_grid(y_shape, 'gtf3d', 0)
else:
raise AssertionError('Grids with dimension > 3 not supported')
O = construct_O(D, k)
if k % 2 == 0:
O = O[:O.shape[1], :]
y = y.reshape((y_size, 1), order='F')
x = y.copy()
z = np.zeros_like(y, dtype=np.float64)
u = z.copy()
for i in range(max_iter):
if y_dim == 2:
b = (O.T @ (rho * z - u) + y).reshape(y_shape, order='F')
x = grid_system_2d(b, k + 1, rho)
elif y_dim == 3:
b = (O.T @ (rho * z - u) + y).reshape(y_shape, order='F')
x = grid_system_3d(b, k + 1, rho)
x = x.reshape((y_size, 1), order='F')
Ox = O @ x
z_new = soft_thresh(Ox + u / rho, lamb / rho)
s = rho * norm(O.T @ (z_new - z))
z = z_new
u += rho * (Ox - z)
r = norm(Ox - z)
tol_abs = 1e-5
tol_rel = 1e-4
eps_pri = np.sqrt(y.size) * tol_abs + tol_rel * max(norm(Ox), norm(z))
eps_dual = np.sqrt(y.size) * tol_abs + tol_rel * norm(O.T @ u)
if r < eps_pri and s < eps_dual:
print('converged.')
break
if i % 1 == 0:
print('{} [r, s]={}, {}, [eps_pri, eps_dual]={},{}'.format(i, r, s, eps_pri, eps_dual))
tau = 2
if r > 10 * s:
rho *= tau
elif s > 10 * s:
rho /= tau
else: # no break
print('Reached maxiter.')
return x.reshape(y_shape, order='F')
| [
"[email protected]"
] | |
28dccbb9398db07f45d327d9f7177a7907e88734 | 974d04d2ea27b1bba1c01015a98112d2afb78fe5 | /python/paddle/distributed/launch/controllers/ipu_controller.py | bf2c5f34b3bdf946f0f16b99225771bd139022e5 | [
"Apache-2.0"
] | permissive | PaddlePaddle/Paddle | b3d2583119082c8e4b74331dacc4d39ed4d7cff0 | 22a11a60e0e3d10a3cf610077a3d9942a6f964cb | refs/heads/develop | 2023-08-17T21:27:30.568889 | 2023-08-17T12:38:22 | 2023-08-17T12:38:22 | 65,711,522 | 20,414 | 5,891 | Apache-2.0 | 2023-09-14T19:20:51 | 2016-08-15T06:59:08 | C++ | UTF-8 | Python | false | false | 6,679 | py | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from paddle.distributed.launch.job.container import Container
from .collective import CollectiveController, ControleMode
class IPUController(CollectiveController):
@classmethod
def enable(cls, ctx):
if ctx.args.training_script == "ipu":
ctx.logger.debug(f"{cls.__name__} enabled")
ctx.args.run_mode = ControleMode.IPU
return True
else:
return False
def parse_ipu_args(self, args_list):
parser = argparse.ArgumentParser()
parser.add_argument(
"--hosts", type=str, help="The hosts for IPU distributd training."
)
parser.add_argument(
"--nproc_per_host",
type=int,
help="The number of processes launched per host.",
)
parser.add_argument(
"--ipus_per_replica",
type=int,
help="The number of IPUs requested per replica.",
)
parser.add_argument(
"--ipu_partition",
type=str,
help="The partition name of IPU devices.",
)
parser.add_argument(
"--vipu_server", type=str, help="The ip of the IPU device manager."
)
parser.add_argument(
"training_script",
type=str,
help="The full path to the IPU distributed training program/script to be launched in parallel. e.g., ``training.py``.",
)
parser.add_argument('training_script_args', nargs=argparse.REMAINDER)
return parser.parse_args(args_list)
def replace_training_script(self):
# IPU distributed computing is based on PopRun which is a wrapper of MPI.
self.ctx.args.training_script = "poprun"
poprun_args = self.parse_ipu_args(self.ctx.args.training_script_args)
num_ipus = int(self.ctx.args.devices)
# The number of replicas for data parallel
assert (
num_ipus % poprun_args.ipus_per_replica
) == 0, "The number of IPUs:{} mod the number of IPUs per replica:{} must == 0".format(
num_ipus, poprun_args.ipus_per_replica
)
num_replicas = num_ipus // poprun_args.ipus_per_replica
self.ctx.logger.info(f"The number of total replicas is {num_replicas}.")
# The number of processes
num_nodes = len(poprun_args.hosts.split(','))
num_procs = num_nodes * poprun_args.nproc_per_host
self.ctx.logger.info(f"The number of total processes is {num_procs}.")
assert (
num_replicas % num_procs
) == 0, "The number of replicas:{} mod the number of processes:{} must == 0".format(
num_replicas, num_procs
)
# hosts and endpoints
hosts = poprun_args.hosts.replace(' ', '').split(',')
endpoints = [x + ":8090" for x in hosts]
# args for poprun
poprun_command = []
poprun_command.append(f'--num-instances={num_procs}')
poprun_command.append(f'--num-replicas={num_replicas}')
poprun_command.append(
f'--ipus-per-replica={poprun_args.ipus_per_replica}'
)
poprun_command.append('--host={}'.format(','.join(hosts)))
poprun_command.append(f'--vipu-partition={poprun_args.ipu_partition}')
poprun_command.append(f'--vipu-server-host={poprun_args.vipu_server}')
poprun_command.extend(
[
'--update-partition=no',
'--vipu-server-timeout=120',
'--print-topology=yes',
'--numa-aware=yes',
]
)
# global envs
global_envs = '--mpi-local-args=\''
log_level = os.getenv('POPART_LOG_LEVEL', None)
if log_level:
global_envs += f'-x POPART_LOG_LEVEL={log_level} '
global_envs += (
'-x PADDLE_TRAINERS_NUM={} -x PADDLE_TRAINER_ENDPOINTS={}'.format(
num_procs, ','.join(endpoints)
)
)
global_envs += '\''
poprun_command.append(global_envs)
# local envs
for idx in range(num_procs):
cur_endpoint = endpoints[idx // poprun_args.nproc_per_host]
rank_in_node = idx % poprun_args.nproc_per_host
poprun_command.append(
'--instance-mpi-local-args={}:\"-x PADDLE_TRAINER_ID={} -x PADDLE_CURRENT_ENDPOINT={} -x PADDLE_RANK_IN_NODE={}\"'.format(
idx, idx, cur_endpoint, rank_in_node
)
)
# executor
poprun_command.append(sys.executable)
# script and script args
poprun_command.append(poprun_args.training_script)
poprun_command.extend(poprun_args.training_script_args)
# for debug
print("----------- PopRun Command -----------")
print("poprun \\")
for i in range(len(poprun_command) - 1):
print("%s \\" % (poprun_command[i]))
print("%s" % (poprun_command[len(poprun_command) - 1]))
print("---------------------------------------")
# replace training_script_args
self.ctx.args.training_script_args = poprun_command
def _get_entrypoint(self):
entrypoint = [self.ctx.args.training_script]
entrypoint.extend(self.ctx.args.training_script_args)
entrypoint = [" ".join(entrypoint)]
return entrypoint
def new_container(
self, entrypoint=None, envs={}, use_ctx_env=True, out=None, err=None
):
c = Container(
entrypoint=(entrypoint or self._get_entrypoint()),
env=(self.ctx.get_envs() if use_ctx_env else {}),
)
c.outfile, c.errfile = self._get_out_err_file(out, err)
c.update_env(envs)
# Need subprocess.Popen(shell=True) for PopRun command
c.shell = True
return c
def run(self):
# Replace the training script with the PopRun command
self.replace_training_script()
self.build_job()
self.build_pod()
self.deploy_pod()
self.watch()
| [
"[email protected]"
] | |
e2f068cbe30c6a0e2ced7c56d424860a07838c8b | d0cd3064e1b24542e02518961fd6643af56738f0 | /ticha-django-site/handwritten_texts/views.py | 96faec09e23c5252e504753c7de6a6f472a2b205 | [] | no_license | zhanpengwang888/Docker-Test | 126bd74301b7550de753eb7539795e20ace285c5 | 2f0eefb684622d6eead3977697e8ccf4761ba1be | refs/heads/master | 2022-12-15T11:49:46.899365 | 2017-09-29T21:02:05 | 2017-09-29T21:02:05 | 104,515,096 | 0 | 0 | null | 2022-12-07T23:56:40 | 2017-09-22T20:16:17 | HTML | UTF-8 | Python | false | false | 1,899 | py | from django.shortcuts import render
from .models import HandwrittenText
from django.views.generic import ListView
class HandwrittenListView(ListView):
model = HandwrittenText
template_name = 'handwritten_texts/list.html'
EN_TO_ES = {
'title': 'título', "language": "idioma", "document_type": "tipo_del_documento",
"material_type": "material_type", "archive": "archivo", "collection": "colección",
"call_number": "número_de_etiqueta", "page": "páginas", "date_digitized": "date_digitized",
"year": "year", "town_modern_official": "pueblo", "primary_parties": "personajes_principales",
"slug": "slug", "town_short": "town_short", "date": "fecha", "has_translation": "has_translation",
"transcription": "transcription", "scribe": "escribano", "is_translation": "is_translation",
"witnesses": "testigos", "acknowledgements": "agradecimientos",
"permission_file": "permission_file", "percent_needs_review": "percent_needs_review",
"requester_project": "requester_project", "timeline_text": "timeline_spanish_text",
"timeline_headline": "timeline_spanish_headline"
}
def handwritten_text_detail_view(request, slug):
"""Custom view to supply the HandwrittenText detail template with its
fields in the proper language.
"""
man = HandwrittenText.objects.get(slug=slug)
translated_man = {}
for en_key, es_key in EN_TO_ES.items():
if request.LANGUAGE_CODE == 'es':
try:
translated_man[en_key] = getattr(man, es_key)
except AttributeError:
translated_man[en_key] = getattr(man, en_key)
else:
translated_man[en_key] = getattr(man, en_key)
context = {'man': translated_man, 'omeka_id': man.omeka_id}
return render(request, 'handwritten_texts/detail.html', context)
def redirect_view(request):
return render(request, 'handwritten_texts/redirect.html')
| [
"[email protected]"
] | |
c13e18406862b21208f3d0eabecafed60a527fec | 0b1002296d0686daae2ec0e83cafbba9833bf837 | /utils/test.py | 20fbd142451833f7a9d51ceac556d2b719c2efae | [] | no_license | williamzxl/tmp1 | b35f967ee62b25c65976b5c0a59cd71b315c2029 | 0213bac933435ae15dfc10997aa6b07300e2f580 | refs/heads/master | 2021-06-10T01:37:35.060796 | 2018-12-21T06:38:14 | 2018-12-21T06:38:14 | 162,676,016 | 1 | 0 | null | 2021-06-01T23:13:05 | 2018-12-21T06:37:07 | Python | UTF-8 | Python | false | false | 122 | py | a = {'groupID': '2475', 'newF': 3, 'sysID': '11-Hubei-Reading-4', 'taskID': '37037'}
print(a.keys())
print(list(a.keys())) | [
"[email protected]"
] | |
0a4496a02194dde02d89f94e065bb5e8d08c414d | b77cc1448ae2c68589c5ee24e1a0b1e53499e606 | /leave/migrations/0011_auto_20170216_1404.py | 583d88c706e6ecb11c94de53c4d77eac4b435c0f | [] | no_license | PregTech-c/Hrp_system | a5514cf6b4c778bf7cc58e8a6e8120ac7048a0a7 | 11d8dd3221497c536dd7df9028b9991632055b21 | refs/heads/master | 2022-10-09T07:54:49.538270 | 2018-08-21T11:12:04 | 2018-08-21T11:12:04 | 145,424,954 | 1 | 1 | null | 2022-10-01T09:48:53 | 2018-08-20T13:58:31 | JavaScript | UTF-8 | Python | false | false | 421 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-02-16 11:04
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('leave', '0010_auto_20170215_1712'),
]
operations = [
migrations.RenameModel(
old_name='LeaveRequestMessage',
new_name='LeaveRequestApplication',
),
]
| [
"[email protected]"
] | |
7b88efcd16638abbf9a7b7bf5405d75f01442fa0 | 4ef12965654c4bc9d6a0635ecf8188ecf2defed8 | /my_new_application_1046/wsgi.py | 3cb6a828d03839b76f908d22de5a1756e8c484a1 | [] | no_license | crowdbotics-apps/my-new-application-1046 | 2d30662ae59e1675d0fee78789898852a1b81b45 | e3ee98e6318bac0a61b3519d148b4b6b80c9c6f5 | refs/heads/master | 2022-12-21T22:11:11.515569 | 2019-02-23T00:42:41 | 2019-02-23T00:42:41 | 172,153,638 | 0 | 0 | null | 2022-12-08T01:40:51 | 2019-02-23T00:41:51 | Python | UTF-8 | Python | false | false | 424 | py | """
WSGI config for my_new_application_1046 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "my_new_application_1046.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
109a21b79abddfbc7f76c75cdba728b887a8d0ef | 9343c9a43e1940b6ca7e96d0891edef385389083 | /labs/lab3/train_hvd_1.py | 197c1a2711684a9cc98dccd1718f112ce9c71686 | [
"MIT"
] | permissive | luyuliu/CSE-5194 | 4441301d283820e45d64b3503061221082cf334b | 52970106c21b30e64d4cf1df26bec09929494060 | refs/heads/master | 2020-07-09T17:19:47.994233 | 2019-12-26T15:13:56 | 2019-12-26T15:13:56 | 204,032,088 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,253 | py | from __future__ import print_function
import argparse
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import torch.utils.data.distributed
import horovod.torch as hvd
import collections
import random, time, os
import torch
from utils import read_words, create_batches, to_var
from gated_cnn import GatedCNN
import torch.nn.functional as F
from torch.utils.data import DistributedSampler, DataLoader
from torch.nn.parallel import DistributedDataParallelCPU, DistributedDataParallel, DataParallel
import torch.multiprocessing as mp
import torch.distributed as dist
from model_2 import SomeNet
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=80, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
help='input batch size for testing (default: 1000)')
parser.add_argument('--epochs', type=int, default=1, metavar='N',
help='number of epochs to train (default: 1)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 3e-3)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=42, metavar='S',
help='random seed (default: 42)')
parser.add_argument('--log-interval', type=int, default=1000, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--fp16-allreduce', action='store_true', default=False,
help='use fp16 compression during allreduce')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
vocab_size = 2000
seq_len = 21
embd_size = 200
n_layers = 10
kernel = (5, embd_size)
out_chs = 64
res_block_count = 5
batch_size = args.batch_size
rank = 0
world_size = 2
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if args.cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
words = read_words('/users/PAS1588/liuluyu0378/lab1/1-billion-word-language-modeling-benchmark-r13output/training-monolingual.tokenized.shuffled', seq_len, kernel[0])
word_counter = collections.Counter(words).most_common(vocab_size-1)
vocab = [w for w, _ in word_counter]
w2i = dict((w, i) for i, w in enumerate(vocab, 1))
w2i['<unk>'] = 0
print('vocab_size', vocab_size)
print('w2i size', len(w2i))
data = [w2i[w] if w in w2i else 0 for w in words]
data = create_batches(data, batch_size, seq_len)
split_idx = int(len(data) * 0.8)
training_data = data[:split_idx]
test_data = data[split_idx:]
rank = hvd.rank()
training_length = len(training_data)
test_length = len(test_data)
training_data = training_data[int(rank * training_length / hvd.size()): int((rank + 1)* training_length / hvd.size())]
test_data = test_data[int(rank * test_length / hvd.size()): int((rank + 1)* test_length / hvd.size())]
print('train samples:', len(training_data))
print('test samples:', len(test_data))
train_dataset = training_data
# Horovod: use DistributedSampler to partition the training data.
train_sampler = torch.utils.data.distributed.DistributedSampler(
train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=80, sampler=train_sampler, **kwargs)
test_dataset = test_data
# Horovod: use DistributedSampler to partition the test data.
test_sampler = torch.utils.data.distributed.DistributedSampler(
test_dataset, num_replicas=hvd.size(), rank=hvd.rank())
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,
sampler=test_sampler, **kwargs)
model = GatedCNN(seq_len, vocab_size, embd_size, n_layers, kernel, out_chs, res_block_count, vocab_size)
if args.cuda:
# Move model to GPU.
model.cuda()
# Horovod: scale learning rate by the number of GPUs.
optimizer = optim.Adam(model.parameters(), lr=args.lr * hvd.size())
total_comm_time = time.time()
# Horovod: broadcast parameters & optimizer state.
adf = time.time()
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
bdf = time.time()
# Horovod: (optional) compression algorithm.
compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(optimizer,
named_parameters=model.named_parameters(),
compression=compression)
total_comm_time = time.time() - total_comm_time
total_training_comm_time = 0
def train(epoch):
start = 0
end = 0
epoch_comm_time = 0
model.train()
# Horovod: set epoch to sampler for shuffling.
train_sampler.set_epoch(epoch)
for batch_idx, (data, target) in enumerate(train_dataset):
a = time.time()
# for i in range(len(data)):
# print(len(data[0][0]))
# data[i] = to_var(torch.stack(data[i]))
# data = torch.stack(data)
# target = torch.stack(target)
data = to_var(torch.LongTensor(data)) # (bs, seq_len)
target = to_var(torch.LongTensor(target)) # (bs,)
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = F.cross_entropy(output, target)
start = time.time()
optimizer.zero_grad()
end = time.time()
epoch_comm_time = end - start + epoch_comm_time
loss.backward()
start = time.time()
optimizer.step()
end = time.time()
epoch_comm_time = end - start + epoch_comm_time
b = time.time()
if batch_idx % args.log_interval == 0:
# Horovod: use train_sampler to determine the number of examples in
# this worker's partition.
if hvd.rank() == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_dataset),
100. * batch_idx / len(train_dataset), loss.item()))
print("Train time: ", b -a)
return epoch_comm_time
def metric_average(val, name):
tensor = torch.tensor(val)
avg_tensor = hvd.allreduce(tensor, name=name)
return avg_tensor.item()
def test():
model.eval()
test_loss = 0.
test_accuracy = 0.
counter = 0
correct = 0
for data, target in test_dataset:
data = to_var(torch.LongTensor(data)) # (bs, seq_len)
target = to_var(torch.LongTensor(target)) # (bs,)
if args.cuda:
data, target = data.cuda(), target.cuda()
output = model(data)
# sum up batch loss
test_loss += F.nll_loss(output, target).item()
_, pred_ids = torch.max(output, 1)
# get the index of the max log-probability
pred = output.data.max(1, keepdim=True)[1]
correct += torch.sum(pred_ids == target).data.item()
counter += data.size(0)
test_accuracy += pred.eq(target.data.view_as(pred)).cpu().float().sum()
# print('Test Acc: {:.2f} % ({}/{})'.format(100 * correct / counter, correct, counter))
# print('Test Loss: {:.4f}'.format(losses/counter))
# # Horovod: use test_sampler to determine the number of examples in
# # this worker's partition.
test_loss /= counter
test_accuracy /= counter
# Horovod: average metric values across workers.
test_loss = metric_average(test_loss, 'avg_loss')
test_accuracy = metric_average(test_accuracy, 'avg_accuracy')
# Horovod: print output only on first rank.
if hvd.rank() == 0:
print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(
test_loss, 100. * test_accuracy))
aa = time.time()
for epoch in range(1, args.epochs + 1):
total_training_comm_time += train(epoch)
bb = time.time()
total_training_time = bb - aa
training_minus_epoch_comm_time = total_training_time - total_training_comm_time
total_comm_time += total_training_comm_time
print("************* Total train time: ", total_training_time, "***************")
print("************* Total training minus comm time: ", training_minus_epoch_comm_time, "***************")
print("************* Total comm time: ", total_comm_time, "***************")
print("************* Broadcast time: ", bdf - adf, "***************")
print("************* allinall time: ", total_training_comm_time, "***************")
test()
| [
"[email protected]"
] | |
cf5d0a7771e323b24bd3ee042f85b1bcbe5f004f | 2233f520493f64c6070dd3e77722e53a7dd738e8 | /day5/my_devices_na.py | 2721f252d62d9ee537165a0fe29a355f3aca4d31 | [
"Apache-2.0"
] | permissive | mpjeffin/pynet-ons-oct17 | 690bb31600b8ef5131439bb25ddce35b4855ba6a | d0daf9c250f79bc34b3b8b06b67004f56ef834a2 | refs/heads/master | 2021-09-07T00:00:02.234456 | 2018-02-13T19:58:11 | 2018-02-13T19:58:11 | 125,467,721 | 1 | 0 | null | 2018-03-16T05:26:10 | 2018-03-16T05:26:10 | null | UTF-8 | Python | false | false | 1,517 | py | """
pynet-rtr1 (Cisco IOS) 184.105.247.70
pynet-rtr2 (Cisco IOS) 184.105.247.71
pynet-sw1 (Arista EOS) 184.105.247.72
pynet-sw2 (Arista EOS) 184.105.247.73
pynet-sw3 (Arista EOS) 184.105.247.74
pynet-sw4 (Arista EOS) 184.105.247.75
juniper-srx 184.105.247.76
"""
from getpass import getpass
password = getpass("Enter standard password: ")
cisco_rtr1 = dict(
hostname='184.105.247.70',
device_type='ios',
username='pyclass',
password=password,
optional_args = {}
)
cisco_rtr2 = dict(
hostname='184.105.247.71',
device_type='ios',
username='pyclass',
password=password,
optional_args = {}
)
arista_sw1 = dict(
hostname='184.105.247.72',
device_type='eos',
username='pyclass',
password=password,
optional_args = {}
)
arista_sw2 = dict(
hostname='184.105.247.73',
device_type='eos',
username='pyclass',
password=password,
optional_args = {}
)
juniper_srx = dict(
hostname='184.105.247.76',
device_type='junos',
username='pyclass',
password=password,
optional_args = {}
)
juniper1 = dict(
hostname='juniper1.twb-tech.com',
device_type='junos',
username='pyclass',
password=password,
optional_args = {}
)
juniper2 = dict(
hostname='juniper2.twb-tech.com',
device_type='junos',
username='pyclass',
password=password,
optional_args = {}
)
device_list = [
cisco_rtr1,
cisco_rtr2,
arista_sw1,
arista_sw2,
juniper_srx,
]
| [
"[email protected]"
] | |
438dfdf0b19a083bd9b1157b53d2919d688c2a8a | 958b0471c52eff93415216cdd1a2b2ad3947a89b | /blueoil/templates/lmnet/object_detection.tpl.py | cf5170303d51745396007741b7ffa0160df7ae0d | [
"Apache-2.0"
] | permissive | fumihwh/blueoil | 4deb606e334b8456e7ace41e3f091ad6dc41afb6 | acb5a270f201f34fe5a5b27a4b395d9c3a838b27 | refs/heads/master | 2020-04-01T22:29:27.525697 | 2018-10-18T09:23:37 | 2018-10-18T09:23:37 | 153,711,347 | 1 | 0 | null | 2018-10-19T01:49:02 | 2018-10-19T01:49:02 | null | UTF-8 | Python | false | false | 4,402 | py | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from easydict import EasyDict
import tensorflow as tf
from lmnet.common import Tasks
from lmnet.networks.object_detection.{{network_module}} import {{network_class}}
from lmnet.datasets.{{dataset_module}} import {{dataset_class}}
from lmnet.data_processor import Sequence
from lmnet.pre_processor import (
ResizeWithGtBoxes,
DivideBy255,
)
from lmnet.post_processor import (
FormatYoloV2,
ExcludeLowScoreBox,
NMS,
)
from lmnet.data_augmentor import (
Brightness,
Color,
Contrast,
FlipLeftRight,
Hue,
SSDRandomCrop,
)
from lmnet.quantizations import (
binary_channel_wise_mean_scaling_quantizer,
linear_mid_tread_half_quantizer,
)
IS_DEBUG = False
NETWORK_CLASS = {{network_class}}
# TODO(wakisaka): should be hidden. generate dataset class on the fly.
DATASET_CLASS = type('DATASET_CLASS', ({{dataset_class}},), {{dataset_class_property}})
IMAGE_SIZE = {{image_size}}
BATCH_SIZE = {{batch_size}}
DATA_FORMAT = "NHWC"
TASK = Tasks.OBJECT_DETECTION
# In order to get instance property `classes`, instantiate DATASET_CLASS.
CLASSES = DATASET_CLASS(subset="train", batch_size=1).classes
MAX_EPOCHS = {{max_epochs}}
SAVE_STEPS = {{save_steps}}
TEST_STEPS = {{test_steps}}
SUMMARISE_STEPS = {{summarise_steps}}
# distributed training
IS_DISTRIBUTION = False
# pretrain
IS_PRETRAIN = False
PRETRAIN_VARS = []
PRETRAIN_DIR = ""
PRETRAIN_FILE = ""
PRE_PROCESSOR = Sequence([
ResizeWithGtBoxes(size=IMAGE_SIZE),
DivideBy255()
])
anchors = [
(1.3221, 1.73145), (3.19275, 4.00944), (5.05587, 8.09892), (9.47112, 4.84053), (11.2364, 10.0071)
]
score_threshold = 0.05
nms_iou_threshold = 0.5
nms_max_output_size = 100
POST_PROCESSOR = Sequence([
FormatYoloV2(
image_size=IMAGE_SIZE,
classes=CLASSES,
anchors=anchors,
data_format=DATA_FORMAT,
),
ExcludeLowScoreBox(threshold=score_threshold),
NMS(iou_threshold=nms_iou_threshold, max_output_size=nms_max_output_size, classes=CLASSES,),
])
NETWORK = EasyDict()
NETWORK.OPTIMIZER_CLASS = tf.train.MomentumOptimizer
NETWORK.OPTIMIZER_KWARGS = {"momentum": 0.9}
NETWORK.LEARNING_RATE_FUNC = tf.train.piecewise_constant
# In the origianl yolov2 Paper, with a starting learning rate of 10−3, dividing it by 10 at 60 and 90 epochs.
# Train data num per epoch is 16551
step_per_epoch = int(16551 / BATCH_SIZE)
NETWORK.LEARNING_RATE_KWARGS = {
"values": [5e-4, 2e-2, 5e-3, 5e-4],
"boundaries": [step_per_epoch, step_per_epoch * 80, step_per_epoch * 120],
}
NETWORK.IMAGE_SIZE = IMAGE_SIZE
NETWORK.BATCH_SIZE = BATCH_SIZE
NETWORK.DATA_FORMAT = DATA_FORMAT
NETWORK.ANCHORS = anchors
NETWORK.OBJECT_SCALE = 5.0
NETWORK.NO_OBJECT_SCALE = 1.0
NETWORK.CLASS_SCALE = 1.0
NETWORK.COORDINATE_SCALE = 1.0
NETWORK.LOSS_IOU_THRESHOLD = 0.6
NETWORK.WEIGHT_DECAY_RATE = 0.0005
NETWORK.SCORE_THRESHOLD = score_threshold
NETWORK.NMS_IOU_THRESHOLD = nms_iou_threshold
NETWORK.NMS_MAX_OUTPUT_SIZE = nms_max_output_size
NETWORK.SEEN_THRESHOLD = 8000
# quantize
NETWORK.ACTIVATION_QUANTIZER = linear_mid_tread_half_quantizer
NETWORK.ACTIVATION_QUANTIZER_KWARGS = {
'bit': 2,
'max_value': 2.0
}
NETWORK.WEIGHT_QUANTIZER = binary_channel_wise_mean_scaling_quantizer
NETWORK.WEIGHT_QUANTIZER_KWARGS = {}
NETWORK.QUANTIZE_FIRST_CONVOLUTION = True
NETWORK.QUANTIZE_LAST_CONVOLUTION = False
# dataset
DATASET = EasyDict()
DATASET.BATCH_SIZE = BATCH_SIZE
DATASET.DATA_FORMAT = DATA_FORMAT
DATASET.PRE_PROCESSOR = PRE_PROCESSOR
DATASET.AUGMENTOR = Sequence([
FlipLeftRight(is_bounding_box=True),
Brightness((0.75, 1.25)),
Color((0.75, 1.25)),
Contrast((0.75, 1.25)),
Hue((-10, 10)),
SSDRandomCrop(min_crop_ratio=0.7),
])
| [
"[email protected]"
] | |
1c6b5467cecb01f1ce0b31daf841a086b27f5729 | 2305ce053d16652d31823bd07faf38553b4f9b63 | /books/CrackingCodesWithPython/Chapter07/PracticeQuestions/Question2.py | 8a2180532dbdb37385f07898cf49f7569908be5b | [
"MIT"
] | permissive | leihuagh/python-tutorials | cff3c5e250a152252d4b725bca19f55721483249 | 33831b983d7bd1491e367b6c7654e687d5ba709b | refs/heads/master | 2020-03-29T17:59:31.226400 | 2018-09-24T08:41:26 | 2018-09-24T08:41:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # Is each spam a global or local variable?
spam = 42 # global/local
def foo():
global spam
spam = 99 # global/local
print(spam)
foo() # mind == blown
| [
"[email protected]"
] | |
ffce63d94730102d0599826ce2ea7e70963a22c9 | 0c9ec5d4bafca45505f77cbd3961f4aff5c10238 | /openapi-python-client/openapi_client/models/process_instance_modification_dto.py | a2247513acf2154c0228b1c01bcedaba8beea8c1 | [
"Apache-2.0"
] | permissive | yanavasileva/camunda-bpm-examples | 98cd2930f5c8df11a56bf04845a8ada5b3bb542d | 051f8f28c62845e68ce4059ab64264c5a0bdc009 | refs/heads/master | 2022-10-19T20:07:21.278160 | 2020-05-27T15:28:27 | 2020-05-27T15:28:27 | 267,320,400 | 0 | 0 | Apache-2.0 | 2020-05-27T14:35:22 | 2020-05-27T13:00:01 | null | UTF-8 | Python | false | false | 7,337 | py | # coding: utf-8
"""
Camunda BPM REST API
OpenApi Spec for Camunda BPM REST API. # noqa: E501
The version of the OpenAPI document: 7.13.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from openapi_client.configuration import Configuration
class ProcessInstanceModificationDto(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'skip_custom_listeners': 'bool',
'skip_io_mappings': 'bool',
'instructions': 'list[ProcessInstanceModificationInstructionDto]',
'annotation': 'str'
}
attribute_map = {
'skip_custom_listeners': 'skipCustomListeners',
'skip_io_mappings': 'skipIoMappings',
'instructions': 'instructions',
'annotation': 'annotation'
}
def __init__(self, skip_custom_listeners=None, skip_io_mappings=None, instructions=None, annotation=None, local_vars_configuration=None): # noqa: E501
"""ProcessInstanceModificationDto - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._skip_custom_listeners = None
self._skip_io_mappings = None
self._instructions = None
self._annotation = None
self.discriminator = None
self.skip_custom_listeners = skip_custom_listeners
self.skip_io_mappings = skip_io_mappings
if instructions is not None:
self.instructions = instructions
if annotation is not None:
self.annotation = annotation
@property
def skip_custom_listeners(self):
"""Gets the skip_custom_listeners of this ProcessInstanceModificationDto. # noqa: E501
Skip execution listener invocation for activities that are started or ended as part of this request. # noqa: E501
:return: The skip_custom_listeners of this ProcessInstanceModificationDto. # noqa: E501
:rtype: bool
"""
return self._skip_custom_listeners
@skip_custom_listeners.setter
def skip_custom_listeners(self, skip_custom_listeners):
"""Sets the skip_custom_listeners of this ProcessInstanceModificationDto.
Skip execution listener invocation for activities that are started or ended as part of this request. # noqa: E501
:param skip_custom_listeners: The skip_custom_listeners of this ProcessInstanceModificationDto. # noqa: E501
:type: bool
"""
self._skip_custom_listeners = skip_custom_listeners
@property
def skip_io_mappings(self):
"""Gets the skip_io_mappings of this ProcessInstanceModificationDto. # noqa: E501
Skip execution of [input/output variable mappings](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/#input-output-variable-mapping) for activities that are started or ended as part of this request. # noqa: E501
:return: The skip_io_mappings of this ProcessInstanceModificationDto. # noqa: E501
:rtype: bool
"""
return self._skip_io_mappings
@skip_io_mappings.setter
def skip_io_mappings(self, skip_io_mappings):
"""Sets the skip_io_mappings of this ProcessInstanceModificationDto.
Skip execution of [input/output variable mappings](https://docs.camunda.org/manual/7.13/user-guide/process-engine/variables/#input-output-variable-mapping) for activities that are started or ended as part of this request. # noqa: E501
:param skip_io_mappings: The skip_io_mappings of this ProcessInstanceModificationDto. # noqa: E501
:type: bool
"""
self._skip_io_mappings = skip_io_mappings
@property
def instructions(self):
"""Gets the instructions of this ProcessInstanceModificationDto. # noqa: E501
JSON array of modification instructions. The instructions are executed in the order they are in. # noqa: E501
:return: The instructions of this ProcessInstanceModificationDto. # noqa: E501
:rtype: list[ProcessInstanceModificationInstructionDto]
"""
return self._instructions
@instructions.setter
def instructions(self, instructions):
"""Sets the instructions of this ProcessInstanceModificationDto.
JSON array of modification instructions. The instructions are executed in the order they are in. # noqa: E501
:param instructions: The instructions of this ProcessInstanceModificationDto. # noqa: E501
:type: list[ProcessInstanceModificationInstructionDto]
"""
self._instructions = instructions
@property
def annotation(self):
"""Gets the annotation of this ProcessInstanceModificationDto. # noqa: E501
An arbitrary text annotation set by a user for auditing reasons. # noqa: E501
:return: The annotation of this ProcessInstanceModificationDto. # noqa: E501
:rtype: str
"""
return self._annotation
@annotation.setter
def annotation(self, annotation):
"""Sets the annotation of this ProcessInstanceModificationDto.
An arbitrary text annotation set by a user for auditing reasons. # noqa: E501
:param annotation: The annotation of this ProcessInstanceModificationDto. # noqa: E501
:type: str
"""
self._annotation = annotation
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProcessInstanceModificationDto):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, ProcessInstanceModificationDto):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
c0eb4bc8412ecaaaa4487ae33bba16b07ed34654 | 0b5ab7349485da4ea40ca343bc50f4cab74c917c | /week06/c11_02.py | 6f2b70e99e3507ef95e9e833368280cb7b5ebef7 | [] | no_license | workherd/Python006-006 | 9bf2782ccda037de9af98eb7daa87fd1edeb3caf | 7aa176c3cf4effd015802b550edfb70f859e94d9 | refs/heads/main | 2023-04-29T14:37:43.545376 | 2021-05-16T04:13:08 | 2021-05-16T04:13:08 | 323,247,475 | 1 | 0 | null | 2020-12-21T06:13:42 | 2020-12-21T06:13:42 | null | UTF-8 | Python | false | false | 712 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2021/1/30 21:06
# @Author : john
# @File : c11.py
# 使用type元类创建类
def pop_value(self, dict_value):
for key in self.keys():
if self.__getitem__(key) == dict_value:
self.pop(key)
break
# 元类要求,必须继承自type
class DelValue(type):
# 元类要求,必须实现new方法
def __new__(cls, name, bases, attrs):
attrs['pop_value'] = pop_value
return type.__new__(cls, name, bases, attrs)
class DelDictValue(dict, metaclass=DelValue):
pass
d = DelDictValue()
d['a'] = 'A'
d['b'] = 'B'
d['c'] = 'C'
d.pop_value('C')
for k,v in d.items():
print(k,v)
| [
"[email protected]"
] | |
477f6768361db49f12efc9b40192ff00dd8077a5 | 1f2860bf84fa87e2d6c3b5e5b1a62e76879a642a | /q41_50/049.py | 5ff4e3fbd4c56fc5ad1ac2c8c1db93e208b34745 | [] | no_license | Vegctrp/pic100knock | 222dc4c981e7d20180a2338184109987f56d1518 | 134a41c9a3fcfc49667a26625cfeaf7bc4a91899 | refs/heads/master | 2020-07-08T17:17:43.869124 | 2019-09-23T15:23:08 | 2019-09-23T15:23:08 | 203,730,580 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 476 | py | import cv2
import numpy as np
import lib4150
import matplotlib.pyplot as plt
import sys,os
sys.path.append(os.getcwd())
from q01_10 import lib0110
if __name__ == '__main__':
img = cv2.imread("Gasyori100knock/Question_41_50/imori.jpg")
img2 = lib0110.OTSU_binarization(lib0110.BGR2GRAY(img))
out = lib4150.Opening_operation(img2, 1)
cv2.imshow("imori", out)
cv2.waitKey(0)
cv2.imwrite("q41_50/049.jpg", out)
cv2.destroyAllWindows() | [
"[email protected]"
] | |
2b381525fefdeac0ef1e5cd6af040849d7ab9e4e | 99b0631baa2fd9ab2455d848b47febf581916272 | /zhijieketang/chapter11/ch11.3.6.py | 5ed6d9e5c1c618b89767775b7f3df937094f87bb | [] | no_license | seceast/PyProjects | a934e366cb619f2610d75b9a0fb47d818814a4de | 7be7193b4126ce920a3d3ffa4ef5d8743b3fa7d1 | refs/heads/master | 2023-03-07T22:23:21.229489 | 2021-02-25T05:37:58 | 2021-02-25T05:37:58 | 265,480,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 641 | py | # coding=utf-8
# 代码文件:chapter11/ch11.3.6.py
class Animal(object):
"""定义动物类"""
def __init__(self, age, sex=1, weight=0.0):
self.age = age # 定义年龄实例变量
self.sex = sex # 定义性别实例变量
self.weight = weight # 定义体重实例变量
def eat(self):
self.weight += 0.05
print('eat...')
def run(self):
self.weight -= 0.01
print('run...')
a1 = Animal(2, 0, 10.0)
print('a1体重:{0:0.2f}'.format(a1.weight))
a1.eat()
print('a1体重:{0:0.2f}'.format(a1.weight))
a1.run()
print('a1体重:{0:0.2f}'.format(a1.weight))
| [
"[email protected]"
] | |
30d160a89cb021b89ffa3a81257339d90f223c24 | b44df2be270793884ca5bd5e79d22c991edae001 | /app/auth/views.py | 9ac180988110a27bb0106dfd45b959fc130d2335 | [] | no_license | Wakarende/Blog | d45c423be8a86227ad4106bbdae429a79ef3dcf2 | 2e2d2f89c55c4f5dbe0cf6fe09f83212bf905f5c | refs/heads/master | 2023-04-23T00:18:47.343723 | 2021-05-05T06:46:51 | 2021-05-05T06:46:51 | 363,127,105 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py | from flask import render_template,redirect,url_for,flash,request
from . import auth
from flask_login import login_required,login_user,logout_user
from ..models import User
from .forms import RegistrationForm,LoginForm
from .. import db
from ..email import mail_message
@auth.route('/login', methods=['GET','POST'])
def login():
login_form = LoginForm()
if login_form.validate_on_submit():
user = User.query.filter_by(email = login_form.email.data).first()
if user is not None and user.verify_password(login_form.password.data):
login_user(user,login_form.remember.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or Password')
title = "Login"
return render_template('auth/login.html',login_form = login_form,title=title)
@auth.route('/register',methods = ["GET","POST"])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email = form.email.data, username = form.username.data,password = form.password.data)
db.session.add(user)
db.session.commit()
mail_message("Welcome to My J Word","email/welcome_user",user.email,user=user)
return redirect(url_for('auth.login'))
title = "New Account"
return render_template('auth/register.html',registration_form = form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
return redirect(url_for("main.index"))
| [
"[email protected]"
] | |
96ea774d03c563f4d521a7284d56912681fc110b | 10d98fecb882d4c84595364f715f4e8b8309a66f | /genomics_ood/images_ood/eval.py | af9737b30f6b5e2487d99c451162174c16c47aad | [
"CC-BY-4.0",
"Apache-2.0"
] | permissive | afcarl/google-research | 51c7b70d176c0d70a5ee31ea1d87590f3d6c6f42 | 320a49f768cea27200044c0d12f394aa6c795feb | refs/heads/master | 2021-12-02T18:36:03.760434 | 2021-09-30T20:59:01 | 2021-09-30T21:07:02 | 156,725,548 | 1 | 0 | Apache-2.0 | 2018-11-08T15:13:53 | 2018-11-08T15:13:52 | null | UTF-8 | Python | false | false | 11,626 | py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Evaluating Likelihood Ratios based on pixel_cnn model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import tensorflow.compat.v1 as tf
from genomics_ood.images_ood import pixel_cnn
from genomics_ood.images_ood import utils
tf.compat.v1.disable_v2_behavior()
flags.DEFINE_string('model_dir', '/tmp/expfashion/rescaleFalse/',
'Directory to write results and logs.')
flags.DEFINE_string('data_dir', '/tmp/image_data',
'Directory to data np arrays.')
flags.DEFINE_integer('ckpt_step', 10, 'The step of the selected ckpt.')
flags.DEFINE_string('exp', 'fashion', 'cifar or fashion')
flags.DEFINE_integer(
'repeat_id', -1,
('We run 10 independent experiments to get the mean and variance of AUROC.',
'repeat_id=i indicates the i-th independent run.',
'repeat_id=-1 indecates only one independent run.'))
FLAGS = flags.FLAGS
REG_WEIGHT_LIST = [0, 10, 100]
MUTATION_RATE_LIST = [0.1, 0.2, 0.3]
def load_datasets(exp, data_dir):
if exp == 'fashion':
datasets = utils.load_fmnist_datasets(data_dir)
else:
datasets = utils.load_cifar_datasets(data_dir)
return datasets
def find_ckpt_match_param(reg_weight, mutation_rate, repeat_id, ckpt_step):
"""Find model ckpt that is trained based on mutation_rate and reg_weight."""
param_dir = 'reg%.2f_mr%.2f' % (reg_weight, mutation_rate)
ckpt_dir = os.path.join(FLAGS.model_dir, param_dir)
if repeat_id == -1:
ckpt_repeat_dir = os.path.join(ckpt_dir, 'model')
else:
# each param_dir may have multiple independent runs
try:
repeat_dir_list = tf.compat.v1.gfile.ListDirectory(ckpt_dir)
except tf.errors.NotFoundError:
return None
repeat_dir = repeat_dir_list[repeat_id]
ckpt_repeat_dir = os.path.join(ckpt_dir, repeat_dir, 'model')
ckpt_file = utils.get_ckpt_at_step(ckpt_repeat_dir, ckpt_step)
# print('ckpt_file={}'.format(ckpt_file))
return ckpt_file
def create_model_and_restore_ckpt(ckpt_file):
"""Restore model from ckpt."""
# load params
params_json_file = os.path.join(os.path.dirname(ckpt_file), 'params.json')
params = utils.load_hparams(params_json_file)
# Define a Pixel CNN network
input_shape = (params['n_dim'], params['n_dim'], params['n_channel'])
dist = pixel_cnn.PixelCNN(
image_shape=input_shape,
dropout_p=params['dropout_p'],
reg_weight=params['reg_weight'],
num_resnet=params['num_resnet'],
num_hierarchies=params['num_hierarchies'],
num_filters=params['num_filters'],
num_logistic_mix=params['num_logistic_mix'],
use_weight_norm=params['use_weight_norm'],
rescale_pixel_value=params['rescale_pixel_value'],
)
saver = tf.compat.v1.train.Saver(max_to_keep=50000)
init_op = tf.compat.v1.global_variables_initializer()
# restore ckpt
sess = tf.compat.v1.Session()
tf.compat.v1.keras.backend.set_session(sess)
sess.run(init_op)
saver.restore(sess, ckpt_file)
return dist, params, sess
def load_data_and_model_and_pred(exp,
data_dir,
reg_weight,
mutation_rate,
repeat_id,
ckpt_step,
eval_mode,
return_per_pixel=False):
"""Load datasets, load model ckpt, and eval the model on the datasets."""
tf.compat.v1.reset_default_graph()
# load datasets
datasets = load_datasets(exp, data_dir)
# load model
ckpt_file = find_ckpt_match_param(reg_weight, mutation_rate, repeat_id,
ckpt_step)
if not ckpt_file: # no ckpt file is found
return None, None
dist, params, sess = create_model_and_restore_ckpt(ckpt_file)
# Evaluations
preds_in = utils.eval_on_data(
datasets['%s_in' % eval_mode],
utils.image_preprocess,
params,
dist,
sess,
return_per_pixel=return_per_pixel)
if eval_mode == 'val':
if exp == 'fashion':
preds_ood = utils.eval_on_data(
datasets['val_ood'],
utils.image_preprocess,
params,
dist,
sess,
return_per_pixel=return_per_pixel)
else:
preds_ood = utils.eval_on_data(
datasets['val_in'],
utils.image_preprocess_grey,
params,
dist,
sess,
return_per_pixel=return_per_pixel)
elif eval_mode == 'test':
preds_ood = utils.eval_on_data(
datasets['test_ood'],
utils.image_preprocess,
params,
dist,
sess,
return_per_pixel=return_per_pixel)
return preds_in, preds_ood
def compute_auc_llr(preds_in, preds_ood, preds0_in, preds0_ood):
"""Compute AUC for LLR."""
# check if samples are in the same order
assert np.array_equal(preds_in['labels'], preds0_in['labels'])
assert np.array_equal(preds_ood['labels'], preds0_ood['labels'])
# evaluate AUROC for OOD detection
auc = utils.compute_auc(
preds_in['log_probs'], preds_ood['log_probs'], pos_label=0)
llr_in = preds_in['log_probs'] - preds0_in['log_probs']
llr_ood = preds_ood['log_probs'] - preds0_ood['log_probs']
auc_llr = utils.compute_auc(llr_in, llr_ood, pos_label=0)
return auc, auc_llr
def print_and_write(f, context):
print(context + '\n')
f.write(context + '\n')
def plot_heatmap(n, data, plt_file, colorbar=True):
"""Plot heatmaps (Figure 3 in the paper)."""
sns.set_style('whitegrid')
sns.set(style='ticks', rc={'lines.linewidth': 4})
cmap_reversed = ListedColormap(sns.color_palette('Greys_r', 6).as_hex())
fig, axes = plt.subplots(nrows=n, ncols=n, figsize=(2 * n - 2, 2 * n - 2))
i = 0
for ax in axes.flat:
im = ax.imshow(data[i], vmin=0, vmax=6, cmap=cmap_reversed)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
i += 1
fig.subplots_adjust(right=0.9)
if colorbar:
cbar_ax = fig.add_axes([0.95, 0.15, 0.05, 0.7])
fig.colorbar(im, cax=cbar_ax)
cbar_ax.tick_params(labelsize=20)
with tf.gfile.Open(plt_file, 'wb') as sp:
plt.savefig(sp, format='pdf', bbox_inches='tight')
def main(unused_argv):
# write results to file
out_dir = os.path.join(FLAGS.model_dir, 'results')
tf.compat.v1.gfile.MakeDirs(out_dir)
out_f = tf.compat.v1.gfile.Open(
os.path.join(out_dir, 'run%d.txt' % FLAGS.repeat_id), 'w')
## Find best bkg model using validation datasets (NotMNIST/CIFAR_grey)
# foreground model
preds_in, preds_ood = load_data_and_model_and_pred(FLAGS.exp, FLAGS.data_dir,
0.0, 0.0, FLAGS.repeat_id,
FLAGS.ckpt_step, 'val')
# background model
auc_llr_reg_mr = np.zeros((len(REG_WEIGHT_LIST), len(MUTATION_RATE_LIST)))
for reg_weight in REG_WEIGHT_LIST:
for mutation_rate in MUTATION_RATE_LIST:
preds0_in, preds0_ood = load_data_and_model_and_pred(
FLAGS.exp, FLAGS.data_dir, reg_weight, mutation_rate, FLAGS.repeat_id,
FLAGS.ckpt_step, 'val')
if not (preds0_in and preds0_ood):
print('reg_weight={}, mutation_rate={}, ckpt not found, skip'.format(
reg_weight, mutation_rate))
continue
auc, auc_llr = compute_auc_llr(preds_in, preds_ood, preds0_in, preds0_ood)
auc_llr_reg_mr[REG_WEIGHT_LIST.index(reg_weight),
MUTATION_RATE_LIST.index(mutation_rate)] = auc_llr
print('reg_weight={}, mutation_rate={}, auc_likelihood={}, auc_llr={}'
.format(reg_weight, mutation_rate, auc, auc_llr))
reg_idx, mr_idx = np.unravel_index(auc_llr_reg_mr.argmax(),
auc_llr_reg_mr.shape)
selected_reg = REG_WEIGHT_LIST[reg_idx]
selected_mr = MUTATION_RATE_LIST[mr_idx]
print_and_write(out_f, 'auc_llr_reg_mr={}'.format(auc_llr_reg_mr))
print_and_write(out_f,
'selected reg={}, mr={}'.format(selected_reg, selected_mr))
## Final test on FashionMNIST-MNIST/CIFAR-SVHN
# foreground model
preds_in, preds_ood = load_data_and_model_and_pred(
FLAGS.exp,
FLAGS.data_dir,
0.0,
0.0,
FLAGS.repeat_id,
FLAGS.ckpt_step,
'test',
return_per_pixel=True)
# background model
preds0_in, preds0_ood = load_data_and_model_and_pred(
FLAGS.exp,
FLAGS.data_dir,
selected_reg,
selected_mr,
FLAGS.repeat_id,
FLAGS.ckpt_step,
'test',
return_per_pixel=True)
auc, auc_llr = compute_auc_llr(preds_in, preds_ood, preds0_in, preds0_ood)
print_and_write(out_f, 'final test, auc={}, auc_llr={}'.format(auc, auc_llr))
out_f.close()
# plot heatmaps (Figure 3)
if FLAGS.exp == 'fashion':
n = 4
# FashionMNIST
log_probs_in = preds_in['log_probs']
log_probs_pp_in, log_probs0_pp_in = preds_in[
'log_probs_per_pixel'], preds0_in['log_probs_per_pixel']
n_sample_in = len(log_probs_in)
log_probs_in_sorted = sorted(
range(n_sample_in), key=lambda k: log_probs_in[k], reverse=True)
ids_seq = np.arange(1, n_sample_in, int(n_sample_in / (n * n)))
## pure likelihood
data = [
log_probs_pp_in[log_probs_in_sorted[ids_seq[i]]] + 6
for i in range(n * n)
]
plt_file = os.path.join(
out_dir, 'run%d_heatmap_fashionmnist_p(x).pdf' % FLAGS.repeat_id)
plot_heatmap(n, data, plt_file)
## LLR
data = [
log_probs_pp_in[log_probs_in_sorted[ids_seq[i]]] -
log_probs0_pp_in[log_probs_in_sorted[ids_seq[i]]] for i in range(n * n)
]
plt_file = os.path.join(
out_dir, 'run%d_heatmap_fashionmnist_LLR(x).pdf' % FLAGS.repeat_id)
plot_heatmap(n, data, plt_file)
# MNIST
log_probs_ood = preds_ood['log_probs']
log_probs_pp_ood, log_probs0_pp_ood = preds_ood[
'log_probs_per_pixel'], preds0_ood['log_probs_per_pixel']
n_sample_ood = len(log_probs_ood)
log_probs_ood_sorted = sorted(
range(n_sample_ood), key=lambda k: log_probs_ood[k], reverse=True)
ids_seq = np.arange(1, n_sample_ood, int(n_sample_ood / (n * n)))
## pure likelihood
data = [
log_probs_pp_ood[log_probs_ood_sorted[ids_seq[i]]] + 6
for i in range(n * n)
]
plt_file = os.path.join(out_dir,
'run%d_heatmap_mnist_p(x).pdf' % FLAGS.repeat_id)
plot_heatmap(n, data, plt_file)
## LLR
data = [
log_probs_pp_ood[log_probs_ood_sorted[ids_seq[i]]] -
log_probs0_pp_ood[log_probs_ood_sorted[ids_seq[i]]]
for i in range(n * n)
]
plt_file = os.path.join(out_dir,
'run%d_heatmap_mnist_LLR(x).pdf' % FLAGS.repeat_id)
plot_heatmap(n, data, plt_file)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
71f210792e7bb560ed5a7e5db4e79a338bba1c63 | ba15fac9fbe53578d3823ceeac67fc338e2312e9 | /tests/functional/filters/e2e_filtering.py | 2980783ed05dd42c1c343efb2ed74a65e966af43 | [
"Apache-2.0"
] | permissive | dblenkus/resolwe-bio-py | 0694dcc32f485ccdd5ad43496ccef985c83819c7 | b6610ef26625492f39fdeef846d3e5a89a0009b3 | refs/heads/master | 2021-01-21T03:33:39.831803 | 2020-05-25T14:29:46 | 2020-05-26T07:48:34 | 55,721,959 | 0 | 0 | null | 2016-04-07T19:26:12 | 2016-04-07T19:26:12 | null | UTF-8 | Python | false | false | 3,627 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import unittest
import six
from ..base import FILES_PATH, BaseResdkFunctionalTest
class BaseResdkFilteringTest(BaseResdkFunctionalTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.data
def _get_ids(self, query):
"""Return id's of objects in query."""
return [getattr(elm, "id") for elm in query]
def _check_filter(self, query_args, expected):
response = self._get_ids(self.endpoint.filter(**query_args))
expected = self._get_ids(expected)
six.assertCountEqual(self, response, expected)
@staticmethod
def datetime_to_str(datetime):
return datetime.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
class TestDataFilter(BaseResdkFilteringTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.data
self.data1 = self.res.run(
slug="upload-fasta-nucl",
input={
"src": os.path.join(FILES_PATH, "genome.fasta.gz"),
"species": "Homo sapiens",
"build": "hg38",
},
data_name="Data 1",
)
self.data2 = self.res.run(
slug="upload-fasta-nucl",
input={
"src": os.path.join(FILES_PATH, "genome.fasta.gz"),
"species": "Homo sapiens",
"build": "hg38",
},
data_name="Data 2",
)
def tearDown(self):
super().tearDown()
self.data1.delete(force=True)
self.data2.delete(force=True)
def test_id(self):
self._check_filter({"id": self.data1.id}, [self.data1])
self._check_filter({"id": self.data2.id}, [self.data2])
self._check_filter({"id__in": [self.data1.id]}, [self.data1])
self._check_filter(
{"id__in": [self.data1.id, self.data2.id]}, [self.data1, self.data2]
)
class TestProcessFilter(BaseResdkFilteringTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.process
self.star = self.res.process.get(slug="alignment-star")
self.hisat2 = self.res.process.get(slug="alignment-hisat2")
def test_id(self):
self._check_filter({"id": self.star.id}, [self.star])
self._check_filter({"id": self.hisat2.id}, [self.hisat2])
self._check_filter({"id__in": [self.star.id]}, [self.star])
self._check_filter(
{"id__in": [self.star.id, self.hisat2.id]}, [self.star, self.hisat2]
)
def test_iterate_method(self):
workflows = list(
self.res.process.filter(type="data:workflow").iterate(chunk_size=10)
)
# Use ``assertGreater`` to avoid updating this test each time
# after new workflow is added / removed.
self.assertGreater(len(workflows), 30)
class TestFeatureFilter(BaseResdkFilteringTest):
def setUp(self):
super().setUp()
self.endpoint = self.res.feature
self.ft1 = self.res.feature.get(
source="ENSEMBL", feature_id="id_001", species="Homo sapiens",
)
self.ft2 = self.res.feature.get(
source="ENSEMBL", feature_id="id_002", species="Mus musculus",
)
@unittest.skip("Turn on when one can prepare KnowledgeBase and ES index for it.")
def test_id(self):
self._check_filter({"feature_id": self.ft1.feature_id}, [self.ft1])
self._check_filter(
{"feature_id__in": [self.ft1.feature_id, self.ft2.feature_id]},
[self.ft1, self.ft2],
)
| [
"[email protected]"
] | |
9375ac379d9e628b7e3443a2979108c2d4b929d6 | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/sieve/e7fe59df2209451392330ca4b6e3a767.py | 92ce912956b69c6858c7563937f5b3852a638d3c | [] | no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 408 | py | def sieve(lim):
rng = range(2,lim+1)
output = range(2,lim+1)
for i in range(len(rng)):
count = 0
for j in range(len(output)):
if output[count] != rng[i]:
if not output[count] % rng[i]:
output.remove(output[count])
count -= 1
count += 1
return output
| [
"[email protected]"
] | |
394523d7bb019d8ca4af4992d2c5ef47409c3c40 | 88a02059cbf16303dad9745a774230b2e078410f | /1163.py | 4ea1e789406c0eacec92cc075cfdac006f318799 | [] | no_license | nekoTheShadow/my_answers_of_yukicoder | 2e3a94b6dab214fda1ae3c53f879190e6eedc31f | d607dee056a84f08305a51d6eb0bac9faa5d74ed | refs/heads/master | 2022-01-16T06:31:48.776630 | 2022-01-04T08:11:27 | 2022-01-04T08:11:27 | 132,351,091 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 691 | py | n, x = map(int, input().split())
a = list(map(int, input().split()))
b = list(map(int, input().split()))
for i in range(n):
a[i] -= x
m = sum(a)
if m >= 0:
print(0)
exit()
if max(a) < 0:
print(-1)
exit()
dp = [{} for _ in range(n+1)]
dp[0][m] = 0
for i in range(n):
for k in dp[i]:
if k in dp[i+1]:
dp[i+1][k] = min(dp[i+1][k], dp[i][k])
else:
dp[i+1][k] = dp[i][k]
if k-a[i] in dp[i+1]:
dp[i+1][k-a[i]] = min(dp[i+1][k-a[i]], dp[i][k]+b[i])
else:
dp[i+1][k-a[i]] = dp[i][k]+b[i]
ans = float('inf')
for k in dp[n]:
if k >= 0:
ans = min(ans, dp[n][k])
print(ans) | [
"[email protected]"
] | |
8b69bf2b7c8280abd5c5fdb09ed901c008f23423 | c744f3ae44ab1a692b4b6a39ce2c3045c81406c4 | /venv/bin/pip | 67066d339c88af6a7e54c8d3ea9ae96a32c1bbb3 | [
"Apache-2.0"
] | permissive | qq529952515/OA | f1e56d37c8e4b35b6f2e9bbdd0fb8370b90cc64d | ccf17b17b3122f9650bb1ab939befad784e9b4e0 | refs/heads/master | 2023-05-07T22:16:04.130146 | 2020-05-17T07:20:59 | 2020-05-17T07:20:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | #!/home/yc/feature_As/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
0f0f5d0b1c85ad0e5682d38e9cb7c54b8cbd0dd4 | 6c29a72dfe9eed38f4667babf74c3ae98983be6f | /tests/test_fixtures.py | 445ff550251b679e46e044775df9352aed857bb4 | [
"Apache-2.0"
] | permissive | ryan-rs/pytest-examples | 0149f79a069a2fb272486d3bf25a756c341f38e8 | 9ca2368c5e86f651497e42bb304415cf2ae3a0df | refs/heads/master | 2020-03-26T15:04:17.374972 | 2018-11-27T16:38:47 | 2018-11-27T16:38:47 | 145,020,977 | 0 | 0 | NOASSERTION | 2018-11-12T19:14:40 | 2018-08-16T17:43:30 | Python | UTF-8 | Python | false | false | 3,540 | py | # -*- coding: utf-8 -*-
# ======================================================================================================================
# Imports
# ======================================================================================================================
import os
import pytest
# ======================================================================================================================
# Fixtures
# ======================================================================================================================
@pytest.fixture
def prefix():
"""A message prefix."""
return 'The start of the message.'
@pytest.fixture
def message():
"""The message"""
return '\nThe message!\n'
@pytest.fixture
def suffix():
"""A message suffix."""
return 'The end of the message.\n'
@pytest.fixture
def static_message_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message."""
filename = tmpdir_factory.mktemp('data').join('static_message.txt').strpath
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
@pytest.fixture
def static_message_with_setup_teardown_fixture(tmpdir_factory, prefix, message, suffix):
"""A fixture which provides a static message, but uses a custom setup/teardown."""
# Setup
filename = '/tmp/static_message.txt'
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
# Deliver
yield filename
# Teardown
os.remove(filename)
@pytest.fixture
def dyanmic_message_fixture_factory(tmpdir_factory, prefix, suffix):
"""A fixture which provides a dynamic message."""
filename = tmpdir_factory.mktemp('data').join('dynamic_message.txt').strpath
def _factory(message):
file_contents = "{0}{1}{2}".format(prefix, message, suffix)
with open(filename, 'w') as f:
f.write(file_contents)
return filename
return _factory
# ======================================================================================================================
# Test Cases
# ======================================================================================================================
@pytest.mark.test_id('747ba3e0-aafb-11e8-bfa2-0025227c8120')
@pytest.mark.jira('ASC-891')
def test_static_message(static_message_fixture, prefix, message, suffix):
"""Verify that the file contains the correct message."""
with open(static_message_fixture, 'r') as f:
assert f.read() == "{0}{1}{2}".format(prefix, message, suffix)
@pytest.mark.test_id('747b9fc6-aafb-11e8-bfa2-0025227c8120')
@pytest.mark.jira('ASC-891')
def test_static_message_with_setup_teardown(static_message_with_setup_teardown_fixture, prefix, message, suffix):
"""Verify that the file contains the correct message."""
with open(static_message_with_setup_teardown_fixture, 'r') as f:
assert f.read() == "{0}{1}{2}".format(prefix, message, suffix)
@pytest.mark.test_id('747b9b84-aafb-11e8-bfa2-0025227c8120')
@pytest.mark.jira('ASC-891')
def test_dynamic_message(dyanmic_message_fixture_factory, prefix, suffix):
"""Verify that the file contains the correct message."""
custom_message = 'Wow! Much Custom!'
with open(dyanmic_message_fixture_factory(custom_message), 'r') as f:
assert f.read() == "{0}{1}{2}".format(prefix, custom_message, suffix)
| [
"[email protected]"
] | |
8f3823f30e9fe5b91a24c5a069368f02949d4f3d | a590cb0c9b232ad98d17a9917a36930c6a2c03f8 | /8kyu/Pre-FizzBuzz Workout 1.py | 3362f123258850350be517a6632dd369785ccf29 | [] | no_license | AbbyGeek/CodeWars | 6e10c10cbdb11f2df17a657d11ff5ffa79a5fb0b | 64dddda9f2a14a0592cc946b35302c4bd9bc569e | refs/heads/master | 2020-12-21T00:14:53.665879 | 2020-01-26T01:16:41 | 2020-01-26T01:16:41 | 236,252,030 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51 | py | def pre_fizz(n):
return [x+1 for x in range(n)] | [
"[email protected]"
] | |
485f78bf0f29fec745e66e6f68080ca1aaf408bf | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02574/s928939056.py | 075c9216b7de983528e8012b9dc8198741dc64ee | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | from math import gcd
from functools import reduce
k=10**6+1
def judge(n,a):
c=[0]*k
for x in a:
c[x]+=1 #対応する数の個数を記録
t=any(sum(c[i::i])>1 for i in range(2,k)) #自身を約数に持つ数が2つ以上与えられたリストに存在するような数が一つでもあるかどうか
t+=reduce(gcd,a)>1 #全体について1以外の公約数があれば1加える
return ['pairwise','setwise','not'][t]+' coprime' #全体に公約数があればt=2,全体の公約数が1で公約数持つペアがあればt=1
n=int(input())
a=list(map(int,input().split()))
print(judge(n,a))
| [
"[email protected]"
] | |
9651901898793df56347602451eb1e0ee5a21e22 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03729/s784741556.py | 223dc3c245f8c168dc6e9e30b687ce73313cf6ab | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 110 | py | a, b, c = input().split()
if a[-1] == b[0] and b[-1] == c[0]:
ans = "YES"
else:
ans = "NO"
print(ans)
| [
"[email protected]"
] | |
1619841e22d47095341a3633936eb0d746446e6c | 1ff31cedc4794083e213e6637deaacab49cfdd9a | /pyatv/mrp/tlv8.py | c8675b1741300017851aee0f599941efce39e7b5 | [
"MIT"
] | permissive | dschu012/pyatv | 910cefec45fcfe94fe9b3fee59672299215db24b | 6496548aee09ff95f5515abb172c1ba19b9d995b | refs/heads/master | 2020-12-04T06:35:25.957921 | 2020-01-01T14:38:54 | 2020-01-01T18:35:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,613 | py | """Implementation of TLV8 used by MRP/HomeKit pairing process.
Note that this implementation only supports one level of value, i.e. no dicts
in dicts.
"""
# Some of the defined tags used by the pairing process
TLV_METHOD = '0'
TLV_IDENTIFIER = '1'
TLV_SALT = '2'
TLV_PUBLIC_KEY = '3'
TLV_PROOF = '4'
TLV_ENCRYPTED_DATA = '5'
TLV_SEQ_NO = '6'
TLV_ERROR = '7'
TLV_BACK_OFF = '8'
TLV_SIGNATURE = '10'
def read_tlv(data):
"""Parse TLV8 bytes into a dict.
If value is larger than 255 bytes, it is split up in multiple chunks. So
the same tag might occurr several times.
"""
def _parse(data, pos, size, result=None):
if result is None:
result = {}
if pos >= size:
return result
tag = str(data[pos])
length = data[pos+1]
value = data[pos+2:pos+2+length]
if tag in result:
result[tag] += value # value > 255 is split up
else:
result[tag] = value
return _parse(data, pos+2+length, size, result)
return _parse(data, 0, len(data))
def write_tlv(data):
"""Convert a dict to TLV8 bytes."""
tlv = b''
for key, value in data.items():
tag = bytes([int(key)])
length = len(value)
pos = 0
# A tag with length > 255 is added multiple times and concatenated into
# one buffer when reading the TLV again.
while pos < len(value):
size = min(length, 255)
tlv += tag
tlv += bytes([size])
tlv += value[pos:pos+size]
pos += size
length -= size
return tlv
| [
"[email protected]"
] | |
6beaf98b5e814440dd35939b8f93ddfce0c683ae | 51108a50ffb48ad154f587c230045bb783f22240 | /bflib/items/writing/base.py | 44c7befeebcb6caac2b247b0ad366f89b2244aba | [
"MIT"
] | permissive | ChrisLR/BasicDungeonRL | c90bd0866c457557cccbad24e14689d5d6db7b00 | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | refs/heads/master | 2021-06-15T13:56:53.888646 | 2019-08-05T16:33:57 | 2019-08-05T16:33:57 | 104,269,987 | 3 | 0 | MIT | 2019-08-05T16:28:23 | 2017-09-20T21:35:19 | Python | UTF-8 | Python | false | false | 125 | py | from bflib.items import listing
from bflib.items.base import Item
@listing.register_type
class WritingItem(Item):
pass
| [
"[email protected]"
] | |
e075f370a97eb97ee3fc17ee4c6814539f1e7217 | c22253c12dbcd0332a97374eb556e5a38844c893 | /lib/plugin/logic.py | 7680f611e22cce67c6675c604f1da3c469c9e911 | [] | no_license | passdacom/SJVA3 | eb7a6731a4edce763757949845247d2f4ec2718c | f204804d983b7d96f56a82abd39b3f3bb3d25ab5 | refs/heads/main | 2023-08-31T21:55:39.679826 | 2021-10-19T03:00:32 | 2021-10-19T03:00:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,113 | py | # -*- coding: utf-8 -*-
#########################################################
# python
import os
import traceback
import time
import threading
import platform
# third-party
# sjva 공용
from framework import db, scheduler
from framework.job import Job
from framework.util import Util
#########################################################
class Logic(object):
db_default = {
'recent_menu_plugin' : '',
}
def __init__(self, P):
self.P = P
def plugin_load(self):
try:
self.P.logger.debug('%s plugin_load', self.P.package_name)
self.db_init()
for module in self.P.module_list:
module.migration()
for module in self.P.module_list:
module.plugin_load()
if module.sub_list is not None:
for sub_name, sub_instance in module.sub_list.items():
sub_instance.plugin_load()
if self.P.ModelSetting is not None:
for module in self.P.module_list:
key = f'{module.name}_auto_start'
if self.P.ModelSetting.has_key(key) and self.P.ModelSetting.get_bool(key):
self.scheduler_start(module.name)
if module.sub_list is not None:
for sub_name, sub_instance in module.sub_list.items():
key = f'{module.name}_{sub_name}_auto_start'
if self.P.ModelSetting.has_key(key) and self.P.ModelSetting.get_bool(key):
self.scheduler_start_sub(module.name, sub_name)
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def db_init(self):
try:
if self.P.ModelSetting is None:
return
for key, value in Logic.db_default.items():
if db.session.query(self.P.ModelSetting).filter_by(key=key).count() == 0:
db.session.add(self.P.ModelSetting(key, value))
for module in self.P.module_list:
if module.sub_list is not None:
for name, sub_instance in module.sub_list.items():
if sub_instance.db_default is not None:
for key, value in sub_instance.db_default.items():
if db.session.query(self.P.ModelSetting).filter_by(key=key).count() == 0:
db.session.add(self.P.ModelSetting(key, value))
if module.db_default is not None:
for key, value in module.db_default.items():
if db.session.query(self.P.ModelSetting).filter_by(key=key).count() == 0:
db.session.add(self.P.ModelSetting(key, value))
db.session.commit()
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def plugin_unload(self):
try:
self.P.logger.debug('%s plugin_unload', self.P.package_name)
for module in self.P.module_list:
module.plugin_unload()
if module.sub_list is not None:
for sub_name, sub_instance in module.sub_list.items():
sub_instance.plugin_unload()
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def scheduler_start(self, sub):
try:
job_id = '%s_%s' % (self.P.package_name, sub)
module = self.get_module(sub)
job = Job(self.P.package_name, job_id, module.get_scheduler_interval(), self.scheduler_function, module.get_scheduler_desc(), False, args=sub)
scheduler.add_job_instance(job)
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def scheduler_stop(self, sub):
try:
job_id = '%s_%s' % (self.P.package_name, sub)
scheduler.remove_job(job_id)
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def scheduler_function(self, sub):
try:
module = self.get_module(sub)
module.scheduler_function()
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def reset_db(self,sub):
try:
module = self.get_module(sub)
return module.reset_db()
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def one_execute(self, sub):
self.P.logger.debug('one_execute :%s', sub)
try:
job_id = '%s_%s' % (self.P.package_name, sub)
if scheduler.is_include(job_id):
if scheduler.is_running(job_id):
ret = 'is_running'
else:
scheduler.execute_job(job_id)
ret = 'scheduler'
else:
def func():
time.sleep(2)
self.scheduler_function(sub)
threading.Thread(target=func, args=()).start()
ret = 'thread'
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
ret = 'fail'
return ret
def immediately_execute(self, sub):
self.P.logger.debug('immediately_execute :%s', sub)
try:
def func():
time.sleep(1)
self.scheduler_function(sub)
threading.Thread(target=func, args=()).start()
ret = {'ret':'success', 'msg':'실행합니다.'}
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
ret = {'ret' : 'danger', 'msg':str(exception)}
return ret
def get_module(self, sub):
try:
for module in self.P.module_list:
if module.name == sub:
return module
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def process_telegram_data(self, data, target=None):
try:
for module in self.P.module_list:
if target is None or target.startswith(module.name):
module.process_telegram_data(data, target=target)
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
#######################################################
# 플러그인 - 모듈 - 서브 구조하에서 서브 관련 함수
def scheduler_start_sub(self, module_name, sub_name):
try:
#self.P.logger.warning('scheduler_start_sub')
job_id = f'{self.P.package_name}_{module_name}_{sub_name}'
ins_module = self.get_module(module_name)
ins_sub = ins_module.sub_list[sub_name]
job = Job(self.P.package_name, job_id, ins_sub.get_scheduler_interval(), ins_sub.scheduler_function, ins_sub.get_scheduler_desc(), False, args=None)
scheduler.add_job_instance(job)
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def scheduler_stop_sub(self, module_name, sub_name):
try:
job_id = f'{self.P.package_name}_{module_name}_{sub_name}'
scheduler.remove_job(job_id)
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def scheduler_function_sub(self, module_name, sub_name):
try:
ins_module = self.get_module(module_name)
ins_sub = ins_module.sub_list[sub_name]
ins_sub.scheduler_function()
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
def one_execute_sub(self, module_name, sub_name):
try:
job_id = f'{self.P.package_name}_{module_name}_{sub_name}'
if scheduler.is_include(job_id):
if scheduler.is_running(job_id):
ret = 'is_running'
else:
scheduler.execute_job(job_id)
ret = 'scheduler'
else:
def func():
time.sleep(2)
self.scheduler_function_sub(module_name, sub_name)
threading.Thread(target=func, args=()).start()
ret = 'thread'
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
ret = 'fail'
return ret
def immediately_execute_sub(self, module_name, sub_name):
self.P.logger.debug(f'immediately_execute : {module_name} {sub_name}')
try:
def func():
time.sleep(1)
self.scheduler_function_sub(module_name, sub_name)
threading.Thread(target=func, args=()).start()
ret = {'ret':'success', 'msg':'실행합니다.'}
except Exception as exception:
self.P.logger.error('Exception:%s', exception)
self.P.logger.error(traceback.format_exc())
ret = {'ret' : 'danger', 'msg':str(exception)}
return ret | [
"[email protected]"
] | |
c37fddbe72a4bf5e7895fef5d2695c5dec44a3c9 | f3c2fa4c6ef32e01b98ac56a2e25419152d69208 | /gpio-utils/radiosimulator.py | 2c6810bd11fb90a6e076c1bbac1c5bd5e6bd0e97 | [
"MIT"
] | permissive | deets/brombeerquark | bd4687d42f9466cd5f6843df6a49e647cf3e2fcc | 9314bc6adaf19ee3868612c8aafdce0f1ebbabb9 | refs/heads/master | 2021-07-19T07:02:22.427227 | 2021-02-28T12:45:57 | 2021-02-28T12:45:57 | 47,883,045 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,399 | py | from __future__ import print_function
from functools import partial
import time
import threading
import Queue
from tfa import TimedFiniteAutomaton
def simulate_gpio_events(queue):
time.sleep(1.0)
# just increase volume by pressing/releasing once
queue.put("volume+pressed")
queue.put("volume+released")
time.sleep(3.0)
# now just hold volume+pressed til 11!
queue.put("volume+pressed")
time.sleep(7.0)
queue.put("volume+released")
# now just hold volume-pressed til we are back to 1
queue.put("volume-pressed")
time.sleep(7.0)
queue.put("volume-released")
# finally, toggle play/pause
queue.put("volume-pressed")
time.sleep(0.1)
queue.put("volume+pressed")
# let go of both buttons
queue.put("volume-release")
queue.put("volume+released")
class Radio(object):
MINVOL, MAXVOL = 1, 11
SAME_TIME_THRESHOLD = .3
def __init__(self):
self._volume = self.MINVOL
self.playing = True
automat = TimedFiniteAutomaton("idle")
automat.add_state("volume_up")
automat.add_state("volume_down")
automat.add_state("nudge_up")
automat.add_state("nudge_down")
automat.add_state("volume_up_or_toggle")
automat.add_state("volume_down_or_toggle")
automat.add_state("toggle_play_pause")
# waiting for either volume change or toggling play/pause
automat.add_transition("idle", "volume_up_or_toggle", "volume+pressed")
automat.add_transition("idle", "volume_down_or_toggle", "volume-pressed")
# after self.SAME_TIME_THRESHOLD seconds, we will transition to volue up/down
# we will re-enter the state on .5 timer events to further increase volume
automat.add_transition("volume_up_or_toggle", "volume_up", self.SAME_TIME_THRESHOLD)
automat.add_transition("volume_down_or_toggle", "volume_down", self.SAME_TIME_THRESHOLD)
automat.add_transition("volume_up", "volume_up", .5)
automat.add_transition("volume_down", "volume_down", .5)
automat.add_transition("volume_up", "idle", "volume+released")
automat.add_transition("volume_down", "idle", "volume-released")
# when we wait for toggle_play_pause, but already release,
# just nudge the volume once in the respective direction!
automat.add_transition("volume_up_or_toggle", "nudge_up", "volume+released")
automat.add_transition("nudge_up", "idle")
automat.add_transition("volume_down_or_toggle", "nudge_down", "volume-released")
automat.add_transition("nudge_down", "idle")
# if within this timeframe the opposite key was pressed, toggle!
automat.add_transition("volume_up_or_toggle", "toggle_play_pause", "volume-pressed")
automat.add_transition("volume_down_or_toggle", "toggle_play_pause", "volume+pressed")
# from play_pause, transition automatically back to idle
automat.add_transition("toggle_play_pause", "idle")
self._automat = automat
self._automat.add_state_change_listener(self._react_to_state_changes)
print(automat.dot())
def _react_to_state_changes(self, _from, to, _on):
if to in ("volume_up", "nudge_up"):
self.volume += 1
elif to in ("volume_down", "nudge_down"):
self.volume -= 1
elif to == "toggle_play_pause":
self.playing = not self.playing
@property
def volume(self):
return self._volume
@volume.setter
def volume(self, value):
self._volume = min(max(value, self.MINVOL), self.MAXVOL)
def run(self):
q = Queue.Queue()
t = threading.Thread(target=partial(simulate_gpio_events, q))
t.daemon = True
t.start()
self._automat.add_state_change_listener(self._print_status)
while True:
try:
event = q.get(block=True, timeout=.1)
except Queue.Empty: #timeout
self._automat.tick()
else:
print("feed", event)
self._automat.feed(event)
def _print_status(self, *_a):
print("Playing: {}, Volume: {}, State: {} ".format(
self.playing,
self.volume,
self._automat.state,
)
)
def main():
radio = Radio()
radio.run()
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
342f10467123051dda12ff9cfcfa59cb2048ea18 | 7bdc1a3565ba8964658a749fb05ddc27f20d0a33 | /scripts/mvn_incremental | d92faf0f5cebe8ccda4cf17fbd1335426c57b1b4 | [] | no_license | wuan/ci-tools | 375cd64d0197e7e482255661c998f11e06c3e404 | 6796ee0a9f0b11a4c4ac7c05bdad047e6edd3313 | refs/heads/master | 2020-12-24T16:24:02.820016 | 2016-03-04T07:33:19 | 2016-03-04T07:33:19 | 39,160,905 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,229 | #!/usr/bin/env python
# coding=utf-8
"""
Copyright 2015 Andreas Würl
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from optparse import OptionParser
import os
import sys
from citools.maven import Pom
if __name__ == '__main__':
pom = Pom(os.path.join(os.getcwd(), "pom.xml"))
modules = set(pom.modules)
parser = OptionParser()
(options, args) = parser.parse_args()
if len(args) == 1:
target = args[0]
persistence = Persistence(target + '.db')
report = persistence.report
if report is not None:
with open('junit.xml', 'w') as junit_result_file:
TestSuite.to_file(junit_result_file, report.test_suites, False, "latin1")
| [
"[email protected]"
] | ||
03d53a2fa862909aa754424596e338d8baa4977a | 03cbc74c3b5c3147e2a5ccfe668594350ac32e09 | /lib/game_controller.py | 9ce9f53324d0447b7aa41f4d4860de35d7426544 | [] | no_license | yapo/scoreboard | 51e1b5ae07ad23390b920c2a8cad13f4e9e71a4f | f35457125e377d19d912509b3b7c7749fc5a15aa | refs/heads/master | 2021-01-18T12:05:53.859445 | 2014-08-30T21:33:58 | 2014-08-30T21:33:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,895 | py | import sys
class game_controller(object):
def __init__(self, config):
self.config = config
self.player1 = player('white')
self.player2 = player('black')
self.last_scored_player = None
self.victory_callbacks = []
self.danger_zone_callbacks = []
self.combo_breaker_callbacks = []
self.first_blood_callbacks = []
def reset(self):
self.player1.reset()
self.player2.reset()
def score(self, player_label):
# identify the players
player = self.player1 if player_label == 'white' else self.player2
other_player = self.player2 if player == self.player1 else self.player1
is_combo_breaker = other_player.combo_counter > 2
if player is not self.last_scored_player:
player.combo_breaker()
other_player.combo_breaker()
self.last_scored_player = player
# score
player.score()
self.player1.show_score()
self.player2.show_score()
# raise game events
if player.goal_counter == self.config.max_goals:
self.victory(player)
elif player.goal_counter == 1 and other_player.goal_counter == 0:
self.execute_callbacks(self.first_blood_callbacks)
elif player.goal_counter == self.config.max_goals - 1:
self.execute_callbacks(self.danger_zone_callbacks)
elif is_combo_breaker:
self.execute_callbacks(self.combo_breaker_callbacks)
def add_handler(self, event_name, handler = None):
callbacks = { 'victory': self.victory_callbacks,
'danger_zone': self.danger_zone_callbacks,
'first_blood': self.first_blood_callbacks,
'combo_breaker': self.combo_breaker_callbacks
}
if event_name in callbacks:
callbacks[event_name].append(handler)
return len(callbacks[event_name]) - 1
else:
raise Exception('non valid event name: {}'.format(event_name))
def execute_callbacks(self, callbacks):
winner = self.get_winner()
loser = self.player1 if not self.player1 == winner else self.player2
for callback in callbacks:
if callback is not None:
callback(winner, loser)
def victory(self, player):
print "victory ... player {} wins".format(player.label)
player.winner = True
self.execute_callbacks(self.victory_callbacks)
def get_winner(self):
return self.player1 if self.player1.goal_counter >= self.player2.goal_counter else self.player2
def get_scored_player(self):
return self.last_scored_player
def get_other_player(self, player):
return self.player1 if player is not self.player1 else self.player2
class player(object):
def __init__(self, label):
self.label = label
self.goal_counter = 0
self.combo_counter = 0
self.winner = False
def reset(self):
print "{}: reset".format(self.label)
self.goal_counter = 0
self.combo_counter = 0
def score(self):
self.goal_counter += 1
self.combo_counter += 1
def show_score(self):
print "{}: score - {}: combos {}".format(self.label, self.goal_counter, self.combo_counter)
def combo_breaker(self):
self.combo_counter = 0
| [
"root@raspberrypi.(none)"
] | root@raspberrypi.(none) |
b7c52a665189f194d3ae5023e42cd32fef26b8e8 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/5/lqi.py | c5b5b09a028ae3b0d4730ed13c933d52455fff36 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'lQI':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
dff6b6a5b2d3975ef21308b798a270290acf6b65 | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/twisted-Zope-3.2.1/twisted/vfs/pathutils.py | a5eb45e442945312b73e304ec7158ad550551c8a | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,390 | py | from zope.interface import Interface, Attribute, implements
def getAbsoluteSegments(path, cwd='/'):
"""
@param path: either a string or a list of string segments
which specifys the desired path. may be relative to the cwd
@param cwd: optional string specifying the current working directory
returns a list of string segments which most succinctly
describe how to get to path from root
"""
if not isinstance(path, list): paths = path.split("/")
else: paths = path
if len(paths) and paths[0] == "":
paths = paths[1:]
else:
paths = cwd.split("/") + paths
result = []
for path in paths:
if path == "..":
if len(result) > 1:
result = result[:-1]
else:
result = []
elif path not in ("", "."):
result.append(path)
return result
def fetch(root, path, cwd='/'):
"""
@param root: IFileSystemContainer which represents the root node
of the filesystem
@param path: either a string or a list of string segments
which specifys the desired path. may be relative to the cwd
@param cwd: optional string specifying the current working directory
returns node described by path relative to the cwd
"""
paths = getAbsoluteSegments(path, cwd)
currNode = root
for path in paths:
currNode = currNode.child(path)
return currNode
def basename(path, cwd='/'):
return getAbsoluteSegments(path, cwd)[-1]
def dirname(path, cwd='/'):
return "/" + "/".join(getAbsoluteSegments(path, cwd)[:-1])
def getRoot(node):
while node.parent is not node:
node = node.parent
return node
def getSegments(node):
ret = []
while node.parent is not node:
ret.append(node.name)
node = node.parent
ret.reverse()
return ret
class IFileSystem(Interface):
root = Attribute("root IFileSystemNode of the IFileSystem")
pathToCWD = Attribute("path to current working directory")
def absPath(path):
"""
returns a normalized absolutized version of the pathname path
"""
def splitPath(path):
"""
returns a normalized absolutized version of the pathname path
split on the filesystem's directory seperator
"""
def joinPath(tail, head):
"""
joins the two paths, tail and head
"""
def dirname(path):
"""
returns the directory name of the container for path
"""
def basename(path):
"""
returns the base name of pathname path
"""
def fetch(path):
"""
returns a node object representing the file with pathname path
"""
def _getImplicitChildren(dir):
"""
returns implicit children for a given dir
this is placed in the filesystem so that the same
directory can have different implicit children depending
on what sort of filesystem it has been placed in
- may not be the best idea ...
returns a list of 2 element tuples:
[ ( path, nodeObject ) ]
eg.
[ ( ".", dir ), ( "..", dir.parent ) ]
"""
class FileSystem:
"""
Wraps unix-like VFS backends, in which directory separator is '/',
root's path is '/', and all directories have '.' and '..'.
Effectively, this is just a convenience wrapper around the other
functions in this module which remembers the root node and the
current working directory.
"""
implements(IFileSystem)
def __init__(self, root, pathToCWD="/"):
self.root = root
self.root.filesystem = self
self.pathToCWD = pathToCWD
def absPath(self, path):
return "/" + "/".join(self.splitPath(path))
def splitPath(self, path):
return getAbsoluteSegments(path, self.pathToCWD)
def joinPath(self, tail, head):
if tail == "/":
return tail + head
else:
return tail + "/" + head
def dirname(self, path):
return dirname(path, self.pathToCWD)
def basename(self, path):
return basename(path, self.pathToCWD)
def fetch(self, pathToFile="."):
return fetch(self.root, pathToFile, self.pathToCWD)
def _getImplicitChildren(self, dir):
return [(".", dir), ("..", dir.parent)]
| [
"[email protected]"
] | |
2e37a656e0edae2639a95df75b96978d92948395 | 6c547e3312e2d1bd3dab123b831053ed7aef7b6d | /pages/MYCL/gain_loss/realized.py | cdacebeed29d6225098e87c9886c149079ecb7fb | [] | no_license | kenito2050/BICL | 8c4239f1e897e4dfc04aa35e827816242b41d5dd | 82891aba56cc49c9cf96ce82472847c4cb10828f | refs/heads/master | 2020-12-31T22:10:44.784193 | 2020-02-10T23:00:10 | 2020-02-10T23:00:10 | 239,039,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,363 | py | from selenium.webdriver.common.by import By
from config_globals import *
class realized():
def __init__(self, driver):
self.driver = driver
def Page_Elements(self):
# Table Header
self.table_header = self.driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div/div/ui-view/div/div[2]/div[3]/div/table")
# Table
self.table = self.driver.find_element(By.XPATH, "/html/body/div[1]/div[3]/div/div/ui-view/div/div[2]/div[3]/div/table/tbody")
return self
# Actions
def verify_Total_Displays(self, test_case_ID, browser, env, time_stamp):
columns = self.driver.find_elements(By.XPATH,
"/html/body/div[1]/div[3]/div/div/ui-view/div/div[2]/div[5]/div/table/tbody")
text_displays = False
for item in columns:
text = columns[0].text
if ("Total" in text):
text_displays = True
break
break
try:
assert text_displays is True
except AssertionError:
screenshot_name = "FAIL" + "_" + test_case_ID + "_" + browser + "_" + env + "_" + time_stamp + ".png"
saved_screenshot_location = str(screenshot_directory / screenshot_name)
self.driver.get_screenshot_as_file(saved_screenshot_location)
raise | [
"[email protected]"
] | |
7088ef175232c41f87a46face5ef3c3f34a5927d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02256/s464205146.py | 66f0c6b76ec4f68636fea32269d78178a8e85ded | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 778 | py | a, b = map(int, input().split())
c = []
if a > b:
a, b = b, a
if b%a == 0:
print(a)
else:
while True:
for i in range(a):
x = i + 2
#print(a, x)
if a%x == 0:
if b%x == 0:
c.append(x)
a = a//x
b = b//x
#print(c)
break
elif b%(a//x) == 0:
c.append(a//x)
a = x
b = b//(a//x)
#print(c)
break
#if x%1000 == 0:
#print(x)
if x > a**0.5:
break
if x > a**0.5:
break
s = 1
for j in c:
s = s * j
print(s)
| [
"[email protected]"
] | |
e5889701fc9377bce8eda18387d6adaea7a0c042 | 50bd113a98b1657d735a933c0fcc446dd8c35e3e | /companies/migrations/0011_auto_20190807_0332.py | 59e7718a743d90f137978161d15d48a3cf61152f | [
"Apache-2.0"
] | permissive | fuseumass/hackerforce | ab0d7e2fcb0c25276eac977fd628a0c67411e059 | dfb6ac1304a7db21853765de9da795e8e9ef20bf | refs/heads/development | 2022-12-23T09:44:38.319260 | 2019-12-05T00:11:59 | 2019-12-28T06:22:01 | 194,482,639 | 13 | 7 | Apache-2.0 | 2022-12-08T03:16:40 | 2019-06-30T06:21:44 | CSS | UTF-8 | Python | false | false | 575 | py | # Generated by Django 2.2.3 on 2019-08-07 07:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('companies', '0010_auto_20190807_0322'),
]
operations = [
migrations.AlterField(
model_name='industry',
name='color',
field=models.CharField(choices=[('blue', 'blue'), ('green', 'green'), ('purple', 'purple'), ('orange', 'orange'), ('yellow', 'yellow'), ('red', 'red'), ('brown', 'brown'), ('pink', 'pink'), ('gray', 'gray')], max_length=10),
),
]
| [
"[email protected]"
] | |
88065cf2fd2349c82ab8c1843bf968f43a975af1 | 98be00ee32971cade82d10c067aff532c3394a62 | /geeksforgeeks/linked_list_merge_sort.py | 200e6d2c43b2057dfa6cccd4818f7680ebebc6f6 | [] | no_license | vigneshhari/Competitive_solutions | 5ab34933ea8d84eab67bdef9bb9e4562f6b90782 | 7a35e1386e5cff71cb5746b6797ccc0f03ceb3f4 | refs/heads/master | 2023-01-11T02:53:01.456863 | 2022-12-29T13:50:03 | 2022-12-29T13:50:03 | 115,146,700 | 4 | 2 | null | 2019-10-26T09:15:03 | 2017-12-22T20:03:51 | Python | UTF-8 | Python | false | false | 391 | py |
class ll:
next = None
def __init__(self,val):
self.val = val
def setnext(self,next):
self.next = next
def stringll(node):
if(node == None):return ""
return str(node.val) + " " + stringll(node.next)
head = ll(-1)
looper = head
for i in range(input()):
temp = ll(input())
looper.setnext(temp)
looper = looper.next
print stringll(head)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.