hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
d4c5d7225aa1d551d6744fefbde6bc3d8b9f8cc2
3,220
py
Python
computation/Tests/Jetson/TF_model.py
y-x-c/Heliot
b98646966fd1d437e308abeed59668df640932de
[ "BSD-3-Clause" ]
4
2019-09-19T15:36:22.000Z
2020-02-18T09:28:54.000Z
computation/Tests/Jetson/TF_model.py
y-x-c/Heliot
b98646966fd1d437e308abeed59668df640932de
[ "BSD-3-Clause" ]
null
null
null
computation/Tests/Jetson/TF_model.py
y-x-c/Heliot
b98646966fd1d437e308abeed59668df640932de
[ "BSD-3-Clause" ]
2
2020-04-14T19:11:32.000Z
2022-01-08T18:59:02.000Z
import numpy as np import os import six.moves.urllib as urllib import sys import tarfile import tensorflow as tf import zipfile from distutils.version import StrictVersion from collections import defaultdict from io import StringIO from matplotlib import pyplot as plt from PIL import Image import json import time import cv2 PATH_TO_FROZEN_GRAPH = '../data/mobilenet_v2_1.4_224/mobilenet_v2_1.4_224_frozen.pb' info='Time taken to load Model into memory:' start_time=time.time() detection_graph = tf.Graph() with detection_graph.as_default(): od_graph_def = tf.GraphDef() with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid: serialized_graph = fid.read() od_graph_def.ParseFromString(serialized_graph) tf.import_graph_def(od_graph_def, name='') end_time=time.time() time_taken=end_time-start_time print(info,time_taken) # Load the labels #Load categories categories = [] with open('../data/' + 'categories.txt', 'r') as f: for line in f: cat = line.split('\n')[0] if cat != 'classes': categories.append(cat) f.close() print('Number of categories:', len(categories)) # Load image size with open('../data/' + 'inputsize.txt', 'r') as f: reqsize = int(f.readline().split('\n')[0]) #print(reqsize) #image_filename = '../data/' + 'image1.jpg' sess=tf.Session(graph=detection_graph) image_filename = '../data/' + 'Tiger.jpg' img = Load_and_process_img(image_filename) key_name='MobilenetV2/Predictions/Reshape_1' result,time_taken=run_inference_b1(key_name,img,detection_graph,1000) print('Time Taken to run Inference is:',time_taken) print(result)
26.178862
100
0.700621
d4c6aa1d03e45cbedd11a4f0d5c301600877fac8
1,326
py
Python
frappe/patches/v13_0/update_date_filters_in_user_settings.py
chentaoz/frappe
ee3c4943bf6177ad3b410cdb0d802af486751a65
[ "MIT" ]
3
2017-12-09T22:05:11.000Z
2019-10-22T12:03:43.000Z
frappe/patches/v13_0/update_date_filters_in_user_settings.py
chentaoz/frappe
ee3c4943bf6177ad3b410cdb0d802af486751a65
[ "MIT" ]
17
2021-03-22T18:47:14.000Z
2022-03-15T12:21:00.000Z
frappe/patches/v13_0/update_date_filters_in_user_settings.py
chentaoz/frappe
ee3c4943bf6177ad3b410cdb0d802af486751a65
[ "MIT" ]
2
2021-05-06T06:14:40.000Z
2021-05-06T10:05:29.000Z
from __future__ import unicode_literals import frappe, json from frappe.model.utils.user_settings import update_user_settings, sync_user_settings
24.109091
85
0.659879
d4c78d441d23d25b49b17e8da38c99500cd4ebd4
3,993
py
Python
miniproject/train.py
peguerosdc/ml4phy-quantum-oscillators
5ce2cc8ea9ad00e23dab45d898e51f484fca5934
[ "MIT" ]
null
null
null
miniproject/train.py
peguerosdc/ml4phy-quantum-oscillators
5ce2cc8ea9ad00e23dab45d898e51f484fca5934
[ "MIT" ]
null
null
null
miniproject/train.py
peguerosdc/ml4phy-quantum-oscillators
5ce2cc8ea9ad00e23dab45d898e51f484fca5934
[ "MIT" ]
1
2021-07-18T11:11:46.000Z
2021-07-18T11:11:46.000Z
import BoltzmannMachine as bm import QHO as qho import numpy as np import datetime # Visualization imports from IPython.display import clear_output from PIL import Image import matplotlib.pyplot as plt import matplotlib matplotlib.rcParams['figure.dpi']=300 # Set the quantum gas with N particles, a limit of 10 for the # quantum numbers and default temperature and frequency N = 10*10 gas = qho.QHOGas(N=N) n_max = 10 training_size = 100000 # the amount of hidden units was set by trial and error hidden_units = 70 # the recipe suggests to set the batchsize to 10, though it can range # from 10 to 100 batchsize = 10 # the recipe suggests a learning rate that makes the weight updates about # 1e-3 times the weights (to within an order of magnitude) eta = 0.005 # the amount of steps was set by trial and error nsteps = 300000 # define the validation set to be used in training_visualization validation_set = gas.generate(amount=20) # Init the boltzmann machine and train it while visualizing the suggested plots training_set = gas.generate(amount=training_size, n_max=n_max) m = bm.BoltzmannMachine(num_hidden=hidden_units) a,b,w = m.train(training_set, batchsize=batchsize, eta=eta, nsteps=nsteps, do_while_training=None) # Store in a file run_id = int(datetime.datetime.now().timestamp()) np.savetxt(f"a_{run_id}.csv", a, delimiter=',') np.savetxt(f"b_{run_id}.csv", b, delimiter=',') np.savetxt(f"w_{run_id}.csv", w, delimiter=',')
40.333333
104
0.69146
d4c7b73306f8c0594f64a791f8292624d0ac8d82
11,237
py
Python
Tests/Marketplace/prepare_public_index_for_private_testing.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
799
2016-08-02T06:43:14.000Z
2022-03-31T11:10:11.000Z
Tests/Marketplace/prepare_public_index_for_private_testing.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
9,317
2016-08-07T19:00:51.000Z
2022-03-31T21:56:04.000Z
Tests/Marketplace/prepare_public_index_for_private_testing.py
diCagri/content
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
[ "MIT" ]
1,297
2016-08-04T13:59:00.000Z
2022-03-31T23:43:06.000Z
import time import os import sys import shutil import json import argparse from zipfile import ZipFile from contextlib import contextmanager from datetime import datetime from Tests.private_build.upload_packs_private import download_and_extract_index, update_index_with_priced_packs, \ extract_packs_artifacts from Tests.Marketplace.marketplace_services import init_storage_client from Tests.scripts.utils.log_util import install_logging from Tests.scripts.utils import logging_wrapper as logging MAX_SECONDS_TO_WAIT_FOR_LOCK = 600 LOCK_FILE_PATH = 'lock.txt' def upload_modified_index(public_index_folder_path, extract_destination_path, public_ci_dummy_index_blob, build_number, private_packs): """Upload updated index zip to cloud storage. Args: public_index_folder_path (str): public index folder full path. extract_destination_path (str): extract folder full path. public_ci_dummy_index_blob (Blob): google cloud storage object that represents the dummy index.zip blob. build_number (str): circleCI build number, used as an index revision. private_packs (list): List of private packs and their price. """ with open(os.path.join(public_index_folder_path, "index.json"), "w+") as index_file: for private_pack in private_packs: private_pack['price'] = 0 index = { 'revision': build_number, 'modified': datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ'), 'packs': private_packs } json.dump(index, index_file, indent=4) index_zip_name = os.path.basename(public_index_folder_path) index_zip_path = shutil.make_archive(base_name=public_index_folder_path, format="zip", root_dir=extract_destination_path, base_dir=index_zip_name) try: public_ci_dummy_index_blob.reload() public_ci_dummy_index_blob.cache_control = "no-cache,max-age=0" # disabling caching for index blob public_ci_dummy_index_blob.upload_from_filename(index_zip_path) logging.success("Finished uploading index.zip to storage.") except Exception: logging.exception("Failed in uploading index. Mismatch in index file generation.") sys.exit(1) finally: shutil.rmtree(public_index_folder_path) def option_handler(): """Validates and parses script arguments. Returns: Namespace: Parsed arguments object. """ parser = argparse.ArgumentParser(description="Store packs in cloud storage.") # disable-secrets-detection-start parser.add_argument('-b', '--public_bucket_name', help="CI public bucket name", required=True) parser.add_argument('-pb', '--private_bucket_name', help="CI private bucket name", required=True) parser.add_argument('-s', '--service_account', help=("Path to gcloud service account, is for circleCI usage. " "For local development use your personal account and " "authenticate using Google Cloud SDK by running: " "`gcloud auth application-default login` and leave this parameter blank. " "For more information go to: " "https://googleapis.dev/python/google-api-core/latest/auth.html"), required=False) parser.add_argument('-n', '--ci_build_number', help="CircleCi build number (will be used as hash revision at index file)", required=True) parser.add_argument('-e', '--extract_public_index_path', help="Full path of folder to extract the public index", required=True) parser.add_argument('-sb', '--storage_base_path', help="Storage base path of the directory to upload to.", required=False) parser.add_argument('-p', '--pack_name', help="Modified pack to upload to gcs.") parser.add_argument('-a', '--artifacts_path', help="The full path of packs artifacts", required=True) parser.add_argument('-ea', '--extract_artifacts_path', help="Full path of folder to extract wanted packs", required=True) parser.add_argument('-di', '--dummy_index_dir_path', help="Full path to the dummy index in the private CI bucket", required=True) # disable-secrets-detection-end return parser.parse_args() if __name__ == '__main__': main()
48.021368
129
0.707128
d4c9b736f8e2520a3fae30db6df87b55b43b886b
106
py
Python
ARMODServers/Apps/ARExperiences/apps.py
Phantomxm2021/ARMOD-Dashboard
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
[ "Apache-2.0" ]
1
2021-11-04T09:03:27.000Z
2021-11-04T09:03:27.000Z
ARMODServers/Apps/ARExperiences/apps.py
Phantomxm2021/ARMOD-Dashboard
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
[ "Apache-2.0" ]
null
null
null
ARMODServers/Apps/ARExperiences/apps.py
Phantomxm2021/ARMOD-Dashboard
383cf0a5e72dc5a2651f43e693f06773d5b88bbd
[ "Apache-2.0" ]
null
null
null
from django.apps import AppConfig
17.666667
37
0.783019
d4c9cb6d342d54eea3d53d2a8f44856dc1296577
2,843
py
Python
configs/_base_/datasets/flyingchairs_320x448.py
zhouzaida/mmflow
b34f0801061469f04a83133d7f5652dead1f93ce
[ "Apache-2.0" ]
1
2021-11-16T12:32:54.000Z
2021-11-16T12:32:54.000Z
configs/_base_/datasets/flyingchairs_320x448.py
xiaokekeke/mmflow
c9ab798cec832d3472cbb06f04b2d64299802168
[ "Apache-2.0" ]
null
null
null
configs/_base_/datasets/flyingchairs_320x448.py
xiaokekeke/mmflow
c9ab798cec832d3472cbb06f04b2d64299802168
[ "Apache-2.0" ]
1
2022-03-24T06:46:05.000Z
2022-03-24T06:46:05.000Z
dataset_type = 'FlyingChairs' data_root = 'data/FlyingChairs_release' img_norm_cfg = dict(mean=[0., 0., 0.], std=[255., 255., 255.], to_rgb=False) global_transform = dict( translates=(0.05, 0.05), zoom=(1.0, 1.5), shear=(0.86, 1.16), rotate=(-10., 10.)) relative_transform = dict( translates=(0.00375, 0.00375), zoom=(0.985, 1.015), shear=(1.0, 1.0), rotate=(-1.0, 1.0)) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict( type='ColorJitter', brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5), dict(type='RandomGamma', gamma_range=(0.7, 1.5)), dict(type='Normalize', **img_norm_cfg), dict(type='GaussianNoise', sigma_range=(0, 0.04), clamp_range=(0., 1.)), dict(type='RandomFlip', prob=0.5, direction='horizontal'), dict(type='RandomFlip', prob=0.5, direction='vertical'), dict( type='RandomAffine', global_transform=global_transform, relative_transform=relative_transform), dict(type='RandomCrop', crop_size=(320, 448)), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['imgs', 'flow_gt'], meta_keys=[ 'img_fields', 'ann_fields', 'filename1', 'filename2', 'ori_filename1', 'ori_filename2', 'filename_flow', 'ori_filename_flow', 'ori_shape', 'img_shape', 'img_norm_cfg' ]), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations'), dict(type='InputResize', exponent=6), dict(type='Normalize', **img_norm_cfg), dict(type='TestFormatBundle'), dict( type='Collect', keys=['imgs'], meta_keys=[ 'flow_gt', 'filename1', 'filename2', 'ori_filename1', 'ori_filename2', 'ori_shape', 'img_shape', 'img_norm_cfg', 'scale_factor', 'pad_shape' ]) ] flyingchairs_train = dict( type=dataset_type, pipeline=train_pipeline, data_root=data_root, split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt') data = dict( train_dataloader=dict( samples_per_gpu=1, workers_per_gpu=2, drop_last=True, persistent_workers=True), val_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=2, shuffle=False), train=flyingchairs_train, val=dict( type=dataset_type, pipeline=test_pipeline, data_root=data_root, test_mode=True, split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'), test=dict( type=dataset_type, pipeline=test_pipeline, data_root=data_root, test_mode=True, split_file='data/FlyingChairs_release/FlyingChairs_train_val.txt'))
31.241758
78
0.631375
d4ca74c07139ca34712a0c4f0276402a1f20a541
23,370
py
Python
plaidml2/edsl/__init__.py
ZhouXiaolin/plaidml
dac460b6ae19a62299d15eeb17b402d8c26d0c2b
[ "Apache-2.0" ]
4,535
2017-10-20T05:03:57.000Z
2022-03-30T15:42:33.000Z
plaidml2/edsl/__init__.py
ZhouXiaolin/plaidml
dac460b6ae19a62299d15eeb17b402d8c26d0c2b
[ "Apache-2.0" ]
984
2017-10-20T17:16:09.000Z
2022-03-30T05:43:18.000Z
plaidml2/edsl/__init__.py
ZhouXiaolin/plaidml
dac460b6ae19a62299d15eeb17b402d8c26d0c2b
[ "Apache-2.0" ]
492
2017-10-20T18:22:32.000Z
2022-03-30T09:00:05.000Z
# Copyright 2019 Intel Corporation. import logging from collections import namedtuple import numpy as np import six from plaidml2 import DType from plaidml2.core import TensorShape, Buffer from plaidml2.ffi import ForeignObject, ffi, ffi_call, lib logger = logging.getLogger(__name__) def __init(): """Docstring for function plaidml2.edsl.__init""" ffi_call(lib.plaidml_edsl_init) ffi.init_once(__init, 'plaidml_edsl_init') Constraint = namedtuple('Constraint', ['lhs', 'rhs']) _ContractionPart = namedtuple('_ContractionPart', ['op', 'args']) # bind a concrete shape to this tensor def bind(self, shape): ffi_call(lib.plaidml_expr_bind_shape, self.as_ptr(), shape.as_ptr()) class TensorRef: """Docstring for class TensorRef""" def wrap_tensor(x): if isinstance(x, six.integer_types): return Tensor(expr=ffi_call(lib.plaidml_expr_int, x)) if np.issubdtype(type(x), np.integer): return Tensor(expr=ffi_call(lib.plaidml_expr_int, x.item())) if isinstance(x, float): return Tensor(expr=ffi_call(lib.plaidml_expr_float, x)) if isinstance(x, TensorDim): return Tensor(expr=ffi_call(lib.plaidml_expr_dim, x.as_ptr())) if isinstance(x, Tensor): return x raise TypeError('Unexpected type for call argument: {}. fn: {}, args: {}, bad arg: {}'.format( type(x), fn, args, x)) def call(fn, *args): args = [wrap_tensor(x) for x in args] raw_args = [x.as_ptr() for x in args] return Tensor(expr=ffi_call(lib.plaidml_expr_call, fn.encode(), len(args), raw_args)) def cast(x, dtype): return Tensor(expr=ffi_call(lib.plaidml_expr_cast, wrap_tensor(x).as_ptr(), dtype)) def as_bool(x): return cast(x, DType.BOOLEAN) def as_float(x, bit_size): map = { 16: DType.FLOAT16, 32: DType.FLOAT32, 64: DType.FLOAT64, } dtype = map.get(bit_size) if not dtype: raise 'Unsupport bit_size for as_float' return cast(x, dtype) def as_int(x, bit_size): map = { 8: DType.INT8, 16: DType.INT16, 32: DType.INT32, 64: DType.INT64, } dtype = map.get(bit_size) if not dtype: raise 'Unsupport bit_size for as_int' return cast(x, dtype) def as_uint(x, bit_size): map = { 8: DType.UINT8, 16: DType.UINT16, 32: DType.UINT32, 64: DType.UINT64, } dtype = map.get(bit_size) if not dtype: raise 'Unsupport bit_size for as_uint' return cast(x, dtype) def ceil(x): return call('ceil', x) def cond(lhs, rhs, true_case): return IndexedTensor(_ContractionPart(lib.PLAIDML_COMBO_OP_COND, (lhs, rhs, true_case))) def cos(x): return call('cos', x) def exp(x): return call('exp', x) def floor(x): return call('floor', x) def gather(x, y): return call('gather', x, y) def gradients(loss, variables): wrts = [x.as_ptr() for x in variables] raw_grads = ffi.new('plaidml_expr*[]', len(wrts)) ffi_call( lib.plaidml_expr_gradient, len(wrts), wrts, loss.as_ptr(), raw_grads, ) return [Tensor(expr=x) for x in raw_grads] def ident(x): return call('ident', x) def index(x, axis): return call('index', x, axis) def jacobian(loss, variables): wrts = [x.as_ptr() for x in variables] raw_grads = ffi.new('plaidml_expr*[]', len(wrts)) ffi_call( lib.plaidml_expr_jacobian, len(wrts), wrts, loss.as_ptr(), raw_grads, ) return [Tensor(expr=x) for x in raw_grads] def log(x): return call('log', x) def max(x, y): return call('max', x, y) def min(x, y): return call('min', x, y) def pow(x, y): return call('pow', x, y) def prng(state, shape): return call('prng', state, *shape) def reshape(x, dims): return call('reshape', x, *dims) def round(x): return call('round', x) def scatter(x, y, z): return call('scatter', x, y, z) def select(cond, true_case, false_case): return call('cond', cond, true_case, false_case) def shape(x): return call('shape', x) def sin(x): return call('sin', x) def sqrt(x): return call('sqrt', x) def tan(x): return call('tan', x) def tanh(x): return call('tanh', x)
29.2125
98
0.631365
d4cc2ada6fd8bd17a6303118a58e9c1a8c44ff7a
2,265
py
Python
pytorch_toolkit/face_recognition/model/common.py
AnastasiaaSenina/openvino_training_extensions
267425d64372dff5b9083dc0ca6abfc305a71449
[ "Apache-2.0" ]
1
2020-02-09T15:50:49.000Z
2020-02-09T15:50:49.000Z
pytorch_toolkit/face_recognition/model/common.py
akshayjaryal603/openvino_training_extensions
7d606a22143db0af97087709d63a2ec2aa02036c
[ "Apache-2.0" ]
28
2020-09-25T22:40:36.000Z
2022-03-12T00:37:36.000Z
pytorch_toolkit/face_recognition/model/common.py
akshayjaryal603/openvino_training_extensions
7d606a22143db0af97087709d63a2ec2aa02036c
[ "Apache-2.0" ]
1
2021-04-02T07:51:01.000Z
2021-04-02T07:51:01.000Z
""" Copyright (c) 2018 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from abc import abstractmethod from functools import partial import torch.nn as nn from .rmnet_angular import RMNetAngular from .mobilefacenet import MobileFaceNet from .landnet import LandmarksNet from .se_resnet_angular import SEResNetAngular from .shufflenet_v2_angular import ShuffleNetV2Angular from .backbones.se_resnet import se_resnet50, se_resnet101, se_resnet152 from .backbones.resnet import resnet50 from .backbones.se_resnext import se_resnext50, se_resnext101, se_resnext152 models_backbones = {'rmnet': RMNetAngular, 'mobilenetv2': MobileFaceNet, 'mobilenetv2_2x': partial(MobileFaceNet, width_multiplier=2.0), 'mobilenetv2_1_5x': partial(MobileFaceNet, width_multiplier=1.5), 'resnet50': partial(SEResNetAngular, base=resnet50), 'se_resnet50': partial(SEResNetAngular, base=se_resnet50), 'se_resnet101': partial(SEResNetAngular, base=se_resnet101), 'se_resnet152': partial(SEResNetAngular, base=se_resnet152), 'se_resnext50': partial(SEResNetAngular, base=se_resnext50), 'se_resnext101': partial(SEResNetAngular, base=se_resnext101), 'se_resnext152': partial(SEResNetAngular, base=se_resnext152), 'shufflenetv2': ShuffleNetV2Angular} models_landmarks = {'landnet': LandmarksNet}
41.944444
85
0.714349
d4cd43090d9af44b579f4587a49e6d83acfe093a
807
py
Python
src/dataclay/util/logs.py
kpavel/pyclay
275bc8af5c57301231a20cca1cc88556a9c84c79
[ "BSD-3-Clause" ]
1
2020-04-16T17:09:15.000Z
2020-04-16T17:09:15.000Z
src/dataclay/util/logs.py
kpavel/pyclay
275bc8af5c57301231a20cca1cc88556a9c84c79
[ "BSD-3-Clause" ]
35
2019-11-06T17:06:16.000Z
2021-04-12T16:27:20.000Z
src/dataclay/util/logs.py
kpavel/pyclay
275bc8af5c57301231a20cca1cc88556a9c84c79
[ "BSD-3-Clause" ]
1
2020-05-06T11:28:16.000Z
2020-05-06T11:28:16.000Z
""" Class description goes here. """ import json import logging
26.032258
70
0.581165
d4cd4596ad7f6e0187f91e645753c131d68a9a4a
845
py
Python
python/orthogonal_test.py
davxy/numeric
1e8b44a72e1d570433a5ba81ae0795a750ce5921
[ "Unlicense" ]
2
2020-05-03T17:02:44.000Z
2022-02-21T04:09:34.000Z
python/orthogonal_test.py
davxy/numeric
1e8b44a72e1d570433a5ba81ae0795a750ce5921
[ "Unlicense" ]
null
null
null
python/orthogonal_test.py
davxy/numeric
1e8b44a72e1d570433a5ba81ae0795a750ce5921
[ "Unlicense" ]
null
null
null
# Orthogonal linear system solver tests from math import sqrt import numpy as np from orthogonal import orthogonal ################################################################################ # 2x2 orthogonal matrix A = np.matrix('1 1;' '1 -1', float) A = A*1.0/sqrt(2.0) # Known terms vector b = np.matrix('2; 3') # Solve the system x = orthogonal(A, b, 1) # Check if np.allclose(b, A*x) == False: raise Exception('Orthogonal test failure') ################################################################################ # 2x2 orthogonal matrix A = np.matrix('2 -2 1;' '1 2 2;' '2 1 -2', float) A = A*1.0/3.0 # Known terms vector b = np.matrix('2; 3; 4') # Solve the system x = orthogonal(A, b) # Check if np.allclose(b, A*x) == False: raise Exception('Orthogonal test failure')
24.142857
80
0.498225
d4cecc18d5f88370e565ff6b3803a9cfe92f4765
11,056
py
Python
src/autonlp/project.py
adbmd/autonlp
8f7b5559d88775850b6818a09f178dc3407b2ab8
[ "Apache-2.0" ]
1
2021-03-08T17:47:18.000Z
2021-03-08T17:47:18.000Z
src/autonlp/project.py
adbmd/autonlp
8f7b5559d88775850b6818a09f178dc3407b2ab8
[ "Apache-2.0" ]
null
null
null
src/autonlp/project.py
adbmd/autonlp
8f7b5559d88775850b6818a09f178dc3407b2ab8
[ "Apache-2.0" ]
null
null
null
import os import shutil from dataclasses import dataclass from datetime import datetime from typing import Dict, List, Optional from huggingface_hub import Repository from loguru import logger from prettytable import PrettyTable from .splits import TEST_SPLIT, TRAIN_SPLIT, VALID_SPLIT from .tasks import TASKS from .utils import BOLD_TAG, CYAN_TAG, GREEN_TAG, PURPLE_TAG, RESET_TAG, YELLOW_TAG, http_get, http_post from .validation import validate_file FILE_STATUS = ( " Uploaded", " Queued", " In Progress...", " Success!", " Failed: file not found", " Failed: unsupported file type", " Failed: server error", " Invalid column mapping, please fix it and re-upload the file.", ) JOB_STATUS = ( ("", "queued"), ("", "start"), ("", "data_munging"), ("", "model_training"), ("", "success"), ("", "failed"), ) PROJECT_STATUS = ( ("", "Created"), ("", "Data processing started"), ("", "Data processing successful"), ("", "Failed to download data files from the huggingface hub"), ("", "Missing 'train' or 'valid' split in data files"), ("", "Failed to process data files"), ("", "Failed to upload processed data files to the huggingface hub"), ) SPLITS = (TRAIN_SPLIT, VALID_SPLIT, TEST_SPLIT)
38.256055
117
0.565213
d4cf41c3907f30d0f8d4b3c715caa3ef127581dc
5,353
py
Python
backend/services/apns_util.py
xuantan/viewfinder
992209086d01be0ef6506f325cf89b84d374f969
[ "Apache-2.0" ]
645
2015-01-03T02:03:59.000Z
2021-12-03T08:43:16.000Z
backend/services/apns_util.py
hoowang/viewfinder
9caf4e75faa8070d85f605c91d4cfb52c4674588
[ "Apache-2.0" ]
null
null
null
backend/services/apns_util.py
hoowang/viewfinder
9caf4e75faa8070d85f605c91d4cfb52c4674588
[ "Apache-2.0" ]
222
2015-01-07T05:00:52.000Z
2021-12-06T09:54:26.000Z
# -*- coding: utf-8 -*- # Copyright 2012 Viewfinder Inc. All Rights Reserved. """Apple Push Notification service utilities. Original copyright for this code: https://github.com/jayridge/apnstornado TokenToBinary(): converts a hex-encoded token into a binary value CreateMessage(): formats a binary APNs message from parameters ParseResponse(): parses APNs binary response for status & identifier ErrorStatusToString(): converts error status to error message """ __author__ = '[email protected] (Spencer Kimball)' import base64 import json import struct import time from tornado import escape _MAX_PAYLOAD_BYTES = 256 """Maximum number of bytes in the APNS payload.""" _ELLIPSIS_BYTES = escape.utf8(u'') """UTF-8 encoding of the Unicode ellipsis character.""" def _TruncateAlert(alert, max_bytes): """Converts the alert text to UTF-8 encoded JSON format, which is how the alert will be stored in the APNS payload. If the number of resulting bytes exceeds "max_bytes", then truncates the alert text at a Unicode character boundary, taking care not to split JSON escape sequences. Returns the truncated UTF-8 encoded alert text, including a trailing ellipsis character. """ alert_json = escape.utf8(json.dumps(escape.recursive_unicode(alert), ensure_ascii=False)) # Strip quotes added by JSON. alert_json = alert_json[1:-1] # Check if alert fits with no truncation. if len(alert_json) <= max_bytes: return escape.utf8(alert) # Make room for an appended ellipsis. assert max_bytes >= len(_ELLIPSIS_BYTES), 'max_bytes must be at least %d' % len(_ELLIPSIS_BYTES) max_bytes -= len(_ELLIPSIS_BYTES) # Truncate the JSON UTF8 string at a Unicode character boundary. truncated = alert_json[:max_bytes].decode('utf-8', errors='ignore') # If JSON escape sequences were split, then the truncated string may not be valid JSON. Keep # chopping trailing characters until the truncated string is valid JSON. It may take several # tries, such as in the case where a "\u1234" sequence has been split. while True: try: alert = json.loads(u'"%s"' % truncated) break except Exception: truncated = truncated[:-1] # Return the UTF-8 encoding of the alert with the ellipsis appended to it. return escape.utf8(alert) + _ELLIPSIS_BYTES
34.75974
110
0.713992
d4d089a89ed2ccdb81f62b6a9415dbcedcf723fa
25,485
py
Python
demonstrations/tutorial_kernels_module.py
jamesellis1999/qml
33c9d66712b36861dc098f9c789ba2c3ab897fdb
[ "Apache-2.0" ]
216
2020-08-01T03:18:37.000Z
2022-03-25T06:17:52.000Z
demonstrations/tutorial_kernels_module.py
jamesellis1999/qml
33c9d66712b36861dc098f9c789ba2c3ab897fdb
[ "Apache-2.0" ]
173
2020-08-05T09:24:15.000Z
2022-03-30T13:37:05.000Z
demonstrations/tutorial_kernels_module.py
jamesellis1999/qml
33c9d66712b36861dc098f9c789ba2c3ab897fdb
[ "Apache-2.0" ]
66
2020-08-01T05:02:45.000Z
2022-03-02T19:34:54.000Z
r"""Training and evaluating quantum kernels =========================================== .. meta:: :property="og:description": Kernels and alignment training with Pennylane. :property="og:image": https://pennylane.ai/qml/_images/QEK_thumbnail.png .. related:: tutorial_kernel_based_training Kernel-based training with scikit-learn tutorial_data_reuploading_classifier Classification with data reuploading *Authors: Peter-Jan Derks, Paul Fhrmann, Elies Gil-Fuster, Tom Hubregtsen, Johannes Jakob Meyer and David Wierichs. Posted: 24 June 2021* Kernel methods are one of the cornerstones of classical machine learning. Here we are concerned with kernels that can be evaluated on quantum computers, *quantum kernels* for short. In this tutorial you will learn how to evaluate kernels, use them for classification and train them with gradient-based optimization, and all that using the functionality of PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__. The demo is based on Ref. [#Training_QEKs]_, a project from Xanadu's own `QHack <https://qhack.ai/>`__ hackathon. What are kernel methods? ------------------------ To understand what a kernel method does, let's first revisit one of the simplest methods to assign binary labels to datapoints: linear classification. Imagine we want to discern two different classes of points that lie in different corners of the plane. A linear classifier corresponds to drawing a line and assigning different labels to the regions on opposing sides of the line: .. figure:: ../demonstrations/kernels_module/linear_classification.png :align: center :width: 30% We can mathematically formalize this by assigning the label :math:`y` via .. math:: y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \boldsymbol{x}\rangle + b). The vector :math:`\boldsymbol{w}` points perpendicular to the line and thus determine its slope. The independent term :math:`b` specifies the position on the plane. In this form, linear classification can also be extended to higher dimensional vectors :math:`\boldsymbol{x}`, where a line does not divide the entire space into two regions anymore. Instead one needs a *hyperplane*. It is immediately clear that this method is not very powerful, as datasets that are not separable by a hyperplane can't be classified without error. We can actually sneak around this limitation by performing a neat trick: if we define some map :math:`\phi(\boldsymbol{x})` that *embeds* our datapoints into a larger *feature space* and then perform linear classification there, we could actually realise non-linear classification in our original space! .. figure:: ../demonstrations/kernels_module/embedding_nonlinear_classification.png :align: center :width: 65% If we go back to the expression for our prediction and include the embedding, we get .. math:: y(\boldsymbol{x}) = \operatorname{sgn}(\langle \boldsymbol{w}, \phi(\boldsymbol{x})\rangle + b). We will forgo one tiny step, but it can be shown that for the purpose of optimal classification, we can choose the vector defining the decision boundary as a linear combination of the embedded datapoints :math:`\boldsymbol{w} = \sum_i \alpha_i \phi(\boldsymbol{x}_i)`. Putting this into the formula yields .. math:: y(\boldsymbol{x}) = \operatorname{sgn}\left(\sum_i \alpha_i \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x})\rangle + b\right). This rewriting might not seem useful at first, but notice the above formula only contains inner products between vectors in the embedding space: .. math:: k(\boldsymbol{x}_i, \boldsymbol{x}_j) = \langle \phi(\boldsymbol{x}_i), \phi(\boldsymbol{x}_j)\rangle. We call this function the *kernel*. It provides the advantage that we can often find an explicit formula for the kernel :math:`k` that makes it superfluous to actually perform the (potentially expensive) embedding :math:`\phi`. Consider for example the following embedding and the associated kernel: .. math:: \phi((x_1, x_2)) &= (x_1^2, \sqrt{2} x_1 x_2, x_2^2) \\ k(\boldsymbol{x}, \boldsymbol{y}) &= x_1^2 y_1^2 + 2 x_1 x_2 y_1 y_2 + x_2^2 y_2^2 = \langle \boldsymbol{x}, \boldsymbol{y} \rangle^2. This means by just replacing the regular scalar product in our linear classification with the map :math:`k`, we can actually express much more intricate decision boundaries! This is very important, because in many interesting cases the embedding :math:`\phi` will be much costlier to compute than the kernel :math:`k`. In this demo, we will explore one particular kind of kernel that can be realized on near-term quantum computers, namely *Quantum Embedding Kernels (QEKs)*. These are kernels that arise from embedding data into the space of quantum states. We formalize this by considering a parameterised quantum circuit :math:`U(\boldsymbol{x})` that maps a datapoint :math:`\boldsymbol{x}` to the state .. math:: |\psi(\boldsymbol{x})\rangle = U(\boldsymbol{x}) |0 \rangle. The kernel value is then given by the *overlap* of the associated embedded quantum states .. math:: k(\boldsymbol{x}_i, \boldsymbol{x}_j) = | \langle\psi(\boldsymbol{x}_i)|\psi(\boldsymbol{x}_j)\rangle|^2. """ ############################################################################## # A toy problem # ------------- # In this demo, we will treat a toy problem that showcases the # inner workings of classification with quantum embedding kernels, # training variational embedding kernels and the available functionalities # to do both in PennyLane. We of course need to start with some imports: from pennylane import numpy as np import matplotlib as mpl np.random.seed(1359) ############################################################################## # And we proceed right away to create a dataset to work with, the # ``DoubleCake`` dataset. Firstly, we define two functions to enable us to # generate the data. # The details of these functions are not essential for understanding the demo, # so don't mind them if they are confusing. def _make_circular_data(num_sectors): """Generate datapoints arranged in an even circle.""" center_indices = np.array(range(0, num_sectors)) sector_angle = 2 * np.pi / num_sectors angles = (center_indices + 0.5) * sector_angle x = 0.7 * np.cos(angles) y = 0.7 * np.sin(angles) labels = 2 * np.remainder(np.floor_divide(angles, sector_angle), 2) - 1 return x, y, labels ############################################################################## # Next, we define a function to help plot the ``DoubleCake`` data: def plot_double_cake_data(X, Y, ax, num_sectors=None): """Plot double cake data and corresponding sectors.""" x, y = X.T cmap = mpl.colors.ListedColormap(["#FF0000", "#0000FF"]) ax.scatter(x, y, c=Y, cmap=cmap, s=25, marker="s") if num_sectors is not None: sector_angle = 360 / num_sectors for i in range(num_sectors): color = ["#FF0000", "#0000FF"][(i % 2)] other_color = ["#FF0000", "#0000FF"][((i + 1) % 2)] ax.add_artist( mpl.patches.Wedge( (0, 0), 1, i * sector_angle, (i + 1) * sector_angle, lw=0, color=color, alpha=0.1, width=0.5, ) ) ax.add_artist( mpl.patches.Wedge( (0, 0), 0.5, i * sector_angle, (i + 1) * sector_angle, lw=0, color=other_color, alpha=0.1, ) ) ax.set_xlim(-1, 1) ax.set_ylim(-1, 1) ax.set_aspect("equal") ax.axis("off") return ax ############################################################################## # Let's now have a look at our dataset. In our example, we will work with # 3 sectors: import matplotlib.pyplot as plt num_sectors = 3 X, Y = make_double_cake_data(num_sectors) ax = plot_double_cake_data(X, Y, plt.gca(), num_sectors=num_sectors) ############################################################################## # Defining a Quantum Embedding Kernel # ----------------------------------- # PennyLane's `kernels module <https://pennylane.readthedocs.io/en/latest/code/qml_kernels.html>`__ # allows for a particularly simple # implementation of Quantum Embedding Kernels. The first ingredient we # need for this is an *ansatz*, which we will construct by repeating a # layer as building block. Let's start by defining this layer: import pennylane as qml def layer(x, params, wires, i0=0, inc=1): """Building block of the embedding ansatz""" i = i0 for j, wire in enumerate(wires): qml.Hadamard(wires=[wire]) qml.RZ(x[i % len(x)], wires=[wire]) i += inc qml.RY(params[0, j], wires=[wire]) qml.broadcast(unitary=qml.CRZ, pattern="ring", wires=wires, parameters=params[1]) ############################################################################## # To construct the ansatz, this layer is repeated multiple times, reusing # the datapoint ``x`` but feeding different variational # parameters ``params`` into each of them. # Together, the datapoint and the variational parameters fully determine # the embedding ansatz :math:`U(\boldsymbol{x})`. # In order to construct the full kernel circuit, we also require its adjoint # :math:`U(\boldsymbol{x})^\dagger`, which we can obtain via ``qml.adjoint``. def ansatz(x, params, wires): """The embedding ansatz""" for j, layer_params in enumerate(params): layer(x, layer_params, wires, i0=j * len(wires)) adjoint_ansatz = qml.adjoint(ansatz) def random_params(num_wires, num_layers): """Generate random variational parameters in the shape for the ansatz.""" return np.random.uniform(0, 2 * np.pi, (num_layers, 2, num_wires), requires_grad=True) ############################################################################## # Together with the ansatz we only need a device to run the quantum circuit on. # For the purpose of this tutorial we will use PennyLane's ``default.qubit`` # device with 5 wires in analytic mode. dev = qml.device("default.qubit", wires=5, shots=None) wires = dev.wires.tolist() ############################################################################## # Let us now define the quantum circuit that realizes the kernel. We will compute # the overlap of the quantum states by first applying the embedding of the first # datapoint and then the adjoint of the embedding of the second datapoint. We # finally extract the probabilities of observing each basis state. ############################################################################## # The kernel function itself is now obtained by looking at the probability # of observing the all-zero state at the end of the kernel circuit -- because # of the ordering in ``qml.probs``, this is the first entry: def kernel(x1, x2, params): return kernel_circuit(x1, x2, params)[0] ############################################################################## # # .. note:: # An alternative way to set up the kernel circuit in PennyLane would be # to use the observable type # `Projector <https://pennylane.readthedocs.io/en/latest/code/api/pennylane.Projector.html>`__. # This is shown in the # `demo on kernel-based training of quantum models <https://pennylane.ai/qml/demos/tutorial_kernel_based_training.html>`__, where you will also find more # background information on the kernel circuit structure itself. # # Before focusing on the kernel values we have to provide values for the # variational parameters. At this point we fix the number of layers in the # ansatz circuit to :math:`6`. init_params = random_params(num_wires=5, num_layers=6) ############################################################################## # Now we can have a look at the kernel value between the first and the # second datapoint: kernel_value = kernel(X[0], X[1], init_params) print(f"The kernel value between the first and second datapoint is {kernel_value:.3f}") ############################################################################## # The mutual kernel values between all elements of the dataset form the # *kernel matrix*. We can inspect it via the ``qml.kernels.square_kernel_matrix`` # method, which makes use of symmetry of the kernel, # :math:`k(\boldsymbol{x}_i,\boldsymbol{x}_j) = k(\boldsymbol{x}_j, \boldsymbol{x}_i)`. # In addition, the option ``assume_normalized_kernel=True`` ensures that we do not # calculate the entries between the same datapoints, as we know them to be 1 # for our noiseless simulation. Overall this means that we compute # :math:`\frac{1}{2}(N^2-N)` kernel values for :math:`N` datapoints. # To include the variational parameters, we construct a ``lambda`` function that # fixes them to the values we sampled above. init_kernel = lambda x1, x2: kernel(x1, x2, init_params) K_init = qml.kernels.square_kernel_matrix(X, init_kernel, assume_normalized_kernel=True) with np.printoptions(precision=3, suppress=True): print(K_init) ############################################################################## # Using the Quantum Embedding Kernel for predictions # -------------------------------------------------- # The quantum kernel alone can not be used to make predictions on a # dataset, becaues it is essentially just a tool to measure the similarity # between two datapoints. To perform an actual prediction we will make use # of scikit-learn's Support Vector Classifier (SVC). from sklearn.svm import SVC ############################################################################## # To construct the SVM, we need to supply ``sklearn.svm.SVC`` with a function # that takes two sets of datapoints and returns the associated kernel matrix. # We can make use of the function ``qml.kernels.kernel_matrix`` that provides # this functionality. It expects the kernel to not have additional parameters # besides the datapoints, which is why we again supply the variational # parameters via the ``lambda`` function from above. # Once we have this, we can let scikit-learn adjust the SVM from our Quantum # Embedding Kernel. # # .. note:: # This step does *not* modify the variational parameters in our circuit # ansatz. What it does is solving a different optimization task for the # :math:`\alpha` and :math:`b` vectors we introduced in the beginning. svm = SVC(kernel=lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, init_kernel)).fit(X, Y) ############################################################################## # To see how well our classifier performs we will measure which percentage # of the dataset it classifies correctly. accuracy_init = accuracy(svm, X, Y) print(f"The accuracy of the kernel with random parameters is {accuracy_init:.3f}") ############################################################################## # We are also interested in seeing what the decision boundaries in this # classification look like. This could help us spotting overfitting issues # visually in more complex data sets. To this end we will introduce a # second helper method. ############################################################################## # With that done, let's have a look at the decision boundaries for our # initial classifier: init_plot_data = plot_decision_boundaries(svm, plt.gca()) ############################################################################## # We see the outer points in the dataset can be correctly classified, but # we still struggle with the inner circle. But remember we have a circuit # with many free parameters! It is reasonable to believe we can give # values to those variational parameters which improve the overall accuracy # of our SVC. # # Training the Quantum Embedding Kernel # ------------------------------------- # # To be able to train the Quantum Embedding Kernel we need some measure of # how well it fits the dataset in question. Performing an exhaustive # search in parameter space is not a good solution because it is very # resource intensive, and since the accuracy is a discrete quantity we # would not be able to detect small improvements. # # We can, however, resort to a more specialized measure, the # *kernel-target alignment* [#Alignment]_. The kernel-target alignment compares the # similarity predicted by the quantum kernel to the actual labels of the # training data. It is based on *kernel alignment*, a similiarity measure # between two kernels with given kernel matrices :math:`K_1` and # :math:`K_2`: # # .. math:: # \operatorname{KA}(K_1, K_2) = \frac{\operatorname{Tr}(K_1 K_2)}{\sqrt{\operatorname{Tr}(K_1^2)\operatorname{Tr}(K_2^2)}}. # # .. note:: # Seen from a more theoretical side, :math:`\operatorname{KA}` # is nothing else than the cosine of the angle between the kernel # matrices :math:`K_1` and :math:`K_2` if we see them as vectors # in the space of matrices with the Hilbert-Schmidt (or # Frobenius) scalar product # :math:`\langle A, B \rangle = \operatorname{Tr}(A^T B)`. This # reinforces the geometric picture of how this measure relates # to objects, namely two kernels, being aligned in a vector space. # # The training data enters the picture by defining an *ideal* kernel # function that expresses the original labelling in the vector # :math:`\boldsymbol{y}` by assigning to two datapoints the product # of the corresponding labels: # # .. math:: # k_{\boldsymbol{y}}(\boldsymbol{x}_i, \boldsymbol{x}_j) = y_i y_j. # # The assigned kernel is thus :math:`+1` if both datapoints lie in the # same class and :math:`-1` otherwise and its kernel matrix is simply # given by the outer product :math:`\boldsymbol{y}\boldsymbol{y}^T`. # The kernel-target alignment is then defined as the kernel alignment # of the kernel matrix :math:`K` generated by the # quantum kernel and :math:`\boldsymbol{y}\boldsymbol{y}^T`: # # .. math:: # \operatorname{KTA}_{\boldsymbol{y}}(K) # = \frac{\operatorname{Tr}(K \boldsymbol{y}\boldsymbol{y}^T)}{\sqrt{\operatorname{Tr}(K^2)\operatorname{Tr}((\boldsymbol{y}\boldsymbol{y}^T)^2)}} # = \frac{\boldsymbol{y}^T K \boldsymbol{y}}{\sqrt{\operatorname{Tr}(K^2)} N} # # where :math:`N` is the number of elements in :math:`\boldsymbol{y}`, # that is the number of datapoints in the dataset. # # In summary, the kernel-target alignment effectively captures how well # the kernel you chose reproduces the actual similarities of the data. It # does have one drawback, however: having a high kernel-target alignment # is only a necessary but not a sufficient condition for a good # performance of the kernel [#Alignment]_. This means having good alignment is # guaranteed for good performance, but optimal alignment will not always # bring optimal training accuracy with it. # # Let's now come back to the actual implementation. PennyLane's # ``kernels`` module allows you to easily evaluate the kernel # target alignment: kta_init = qml.kernels.target_alignment(X, Y, init_kernel, assume_normalized_kernel=True) print(f"The kernel-target alignment for our dataset and random parameters is {kta_init:.3f}") ############################################################################## # Now let's code up an optimization loop and improve the kernel-target alignment! # # We will make use of regular gradient descent optimization. To speed up # the optimization we will not use the entire training set to compute # :math:`\operatorname{KTA}` but rather # sample smaller subsets of the data at each step, we choose :math:`4` # datapoints at random. Remember that PennyLane's built-in optimizer works # to *minimize* the cost function that is given to it, which is why we # have to multiply the kernel target alignment by :math:`-1` to actually # *maximize* it in the process. # # .. note:: # Currently, the function ``qml.kernels.target_alignment`` is not # differentiable yet, making it unfit for gradient descent optimization. # We therefore first define a differentiable version of this function. def target_alignment( X, Y, kernel, assume_normalized_kernel=False, rescale_class_labels=True, ): """Kernel-target alignment between kernel and labels.""" K = qml.kernels.square_kernel_matrix( X, kernel, assume_normalized_kernel=assume_normalized_kernel, ) if rescale_class_labels: nplus = np.count_nonzero(np.array(Y) == 1) nminus = len(Y) - nplus _Y = np.array([y / nplus if y == 1 else y / nminus for y in Y]) else: _Y = np.array(Y) T = np.outer(_Y, _Y) inner_product = np.sum(K * T) norm = np.sqrt(np.sum(K * K) * np.sum(T * T)) inner_product = inner_product / norm return inner_product params = init_params opt = qml.GradientDescentOptimizer(0.2) for i in range(500): # Choose subset of datapoints to compute the KTA on. subset = np.random.choice(list(range(len(X))), 4) # Define the cost function for optimization cost = lambda _params: -target_alignment( X[subset], Y[subset], lambda x1, x2: kernel(x1, x2, _params), assume_normalized_kernel=True, ) # Optimization step params = opt.step(cost, params) # Report the alignment on the full dataset every 50 steps. if (i + 1) % 50 == 0: current_alignment = target_alignment( X, Y, lambda x1, x2: kernel(x1, x2, params), assume_normalized_kernel=True, ) print(f"Step {i+1} - Alignment = {current_alignment:.3f}") ############################################################################## # We want to assess the impact of training the parameters of the quantum # kernel. Thus, let's build a second support vector classifier with the # trained kernel: # First create a kernel with the trained parameter baked into it. trained_kernel = lambda x1, x2: kernel(x1, x2, params) # Second create a kernel matrix function using the trained kernel. trained_kernel_matrix = lambda X1, X2: qml.kernels.kernel_matrix(X1, X2, trained_kernel) # Note that SVC expects the kernel argument to be a kernel matrix function. svm_trained = SVC(kernel=trained_kernel_matrix).fit(X, Y) ############################################################################## # We expect to see an accuracy improvement vs.the SVM with random # parameters: accuracy_trained = accuracy(svm_trained, X, Y) print(f"The accuracy of a kernel with trained parameters is {accuracy_trained:.3f}") ############################################################################## # We have now achieved perfect classification! # # Following on the results that SVM's have proven good generalisation # behavior, it will be interesting to inspect the decision boundaries of # our classifier: trained_plot_data = plot_decision_boundaries(svm_trained, plt.gca()) ############################################################################## # Indeed, we see that now not only every data instance falls within the # correct class, but also that there are no strong artifacts that would make us # distrust the model. In this sense, our approach benefits from both: on # one hand it can adjust itself to the dataset, and on the other hand # is not expected to suffer from bad generalisation. # # References # ---------- # # .. [#Training_QEKs] # # Thomas Hubregtsen, David Wierichs, Elies Gil-Fuster, Peter-Jan H. S. Derks, # Paul K. Faehrmann, and Johannes Jakob Meyer. # "Training Quantum Embedding Kernels on Near-Term Quantum Computers." # `arXiv:2105.02276 <https://arxiv.org/abs/2105.02276>`__, 2021. # # .. [#Alignment] # # Wang, Tinghua, Dongyan Zhao, and Shengfeng Tian. # "An overview of kernel alignment and its applications." # `Artificial Intelligence Review 43.2: 179-192 <https://link.springer.com/article/10.1007/s10462-012-9369-4>`__, 2015.
40.645933
157
0.655994
d4d1efc02f1792aaf622052d335ddc24c16d8ad6
5,465
py
Python
main.py
scottkaz/PyLoopover
8f11f559c09747400fe6bb520ab521dbafa90e97
[ "MIT" ]
null
null
null
main.py
scottkaz/PyLoopover
8f11f559c09747400fe6bb520ab521dbafa90e97
[ "MIT" ]
null
null
null
main.py
scottkaz/PyLoopover
8f11f559c09747400fe6bb520ab521dbafa90e97
[ "MIT" ]
null
null
null
#!/usr/bin/python3 import pygame import random import time ##VARIABLES TO CHANGE width = 500 height = 500 stats_height = 150 board_size = 5 window_name = "PyLoopover "+str(board_size)+"x"+str(board_size) scramble_turns = 50 t_round = 3 FPS = 30 ##DONT CHANGE THESE BOIS WHITE = (255,255,255) BLACK = (0,0,0) GREEN = (32,200,32) keys = {"w":0,"a":0,"s":0,"d":0,"q":0} last_was_Q = False def main(): gameboard = Board(board_size) pygame.init() pygame.mixer.quit() #weird workaroud #name the window & size it. pygame.display.set_caption(window_name) screen = pygame.display.set_mode((width,height+stats_height),0,32) #setup framerate pygame.time.set_timer(pygame.USEREVENT+1,int((1/FPS)*1000)) #setup event que pygame.event.set_allowed(None) #start with no events allowed pygame.event.set_allowed(pygame.USEREVENT+1) #timer event pygame.event.set_allowed(pygame.KEYDOWN) pygame.event.set_allowed(pygame.QUIT) #4 quitters #setup fonts font = pygame.font.SysFont('mono',int((width/board_size)/1.14)) font2 = pygame.font.SysFont('mono',int(stats_height/2.3)) #main l00p running = True while running: #eevveeentttss??? event = pygame.event.wait() if event.type == pygame.USEREVENT+1: #a fresh canvas screen.fill(WHITE) #draw stats time = gameboard.get_time() time_str = str( int( time[0] * (10 ** t_round) ) / (10 ** t_round) ) text_timer = font2.render("Time :"+time_str,True,time[1]) text_moves = font2.render("Moves:"+str(gameboard.moves),True,time[1]) screen.blit(text_timer,(0,height)) screen.blit(text_moves,(0,height+(stats_height/2))) #draw board gameboard.draw(screen,font) #update da screeeeeen pygame.display.update() #end the game if gameboard.is_solved() and gameboard.start_t > gameboard.end_t: gameboard.end_time() elif event.type == pygame.KEYDOWN: k = chr(event.key) #gimme a CHAR, not some weird integer domap = { "w":"gameboard.rotate_up(int(pygame.mouse.get_pos()[0]/(width/board_size)))", "a":"gameboard.rotate_right(int(pygame.mouse.get_pos()[1]/(height/board_size)))", "s":"gameboard.rotate_down(int(pygame.mouse.get_pos()[0]/(width/board_size)))", "d":"gameboard.rotate_left(int(pygame.mouse.get_pos()[1]/(height/board_size)))", "q":"gameboard.scramble(scramble_turns)" } #i guess? if k in ['w','a','s','d','q']: #starting game logic if k == "q": last_was_Q = True else: if last_was_Q: gameboard.start_time() last_was_Q = False exec(domap[k]) #end the game if gameboard.is_solved() and gameboard.start_t > gameboard.end_t: gameboard.end_time() #for quitters elif event.type == pygame.QUIT: print("Quitting...") running = False else: print("err0r, bAd 3v3nt lol") assert False if __name__ == "__main__": main()
27.882653
85
0.665691
d4d2b1d5851dc6a58371dc3c355389cf9d7d425c
179
py
Python
test3_05.py
yoojunwoong/python_review01
9bb34f4ef75f951cd090fa623728c9542e7c7c27
[ "Apache-2.0" ]
null
null
null
test3_05.py
yoojunwoong/python_review01
9bb34f4ef75f951cd090fa623728c9542e7c7c27
[ "Apache-2.0" ]
null
null
null
test3_05.py
yoojunwoong/python_review01
9bb34f4ef75f951cd090fa623728c9542e7c7c27
[ "Apache-2.0" ]
null
null
null
# for continue , continue = skip!!! for i in range(1,11): if i == 6: continue; print(i); print(i); print(i); print(i); print(i);
17.9
45
0.49162
d4d42429c658c9fa5c1d797f95b772cf6d3bbc13
12,044
py
Python
csmpe/core_plugins/csm_install_operations/exr/package_lib.py
anushreejangid/csmpe-main
c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a
[ "BSD-2-Clause" ]
null
null
null
csmpe/core_plugins/csm_install_operations/exr/package_lib.py
anushreejangid/csmpe-main
c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a
[ "BSD-2-Clause" ]
8
2017-04-21T05:36:37.000Z
2017-04-27T15:55:33.000Z
csmpe/core_plugins/csm_install_operations/exr/package_lib.py
anushreejangid/csmpe-main
c62ecb3ce4e44b188ed480d06a6d9d21967c6a2a
[ "BSD-2-Clause" ]
null
null
null
# ============================================================================= # # Copyright (c) 2016, Cisco Systems # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. # ============================================================================= """ NCS4K Production Packages External Names Internal Names ncs4k-full-x.iso-6.0.2 ncs4k-mini-x.iso-6.0.2 ncs4k-k9sec.pkg-6.0.2 ncs4k-mpls.pkg-6.0.2 ncs4k-mcast.pkg-6.0.2 ncs4k-mgbl.pkg-6.0.2 NCS6K Production Packages External Names Internal Names ncs6k-doc.pkg-5.2.4 ncs6k-doc-5.2.4 ncs6k-li.pkg-5.2.4 ncs6k-li-5.2.4 ncs6k-mcast.pkg-5.2.4 ncs6k-mcast-5.2.4 ncs6k-mgbl.pkg-5.2.4 ncs6k-mgbl-5.2.4 ncs6k-mini-x.iso-5.2.4 ncs6k-mini-x-5.2.4 ncs6k-mpls.pkg-5.2.4 ncs6k-mpls-5.2.4 ncs6k-sysadmin.iso-5.2.4 ncs6k-sysadmin-5.2.4 ncs6k-full-x.iso-5.2.4 ncs6k-full-x-5.2.4 ncs6k-5.2.5.CSCuy47880.smu ncs6k-5.2.5.CSCuy47880-1.0.0 <- subversion added Engineering Packages External Names Internal Names ncs6k-mcast.pkg-5.2.5.47I.DT_IMAGE ncs6k-mcast-5.2.5.47I ncs6k-mini-x.iso-6.1.0.07I.DT_IMAGE ncs6k-xr-5.2.5.47I ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i.smu ncs6k-5.2.5.47I.CSCuy47880-0.0.4.i ASR9K-64 Production Packages - not finalized yet External Names Internal Names asr9k-mcast-x64-2.0.0.0-r611.x86_64.rpm asr9k-mcast-x64-2.0.0.0-r611 asr9k-bgp-x64-1.0.0.0-r611.x86_64.rpm asr9k-bgp-x64-1.0.0.0-r611 asr9k-mgbl-x64-3.0.0.0-r611.x86_64.rpm asr9k-mgbl-x64-3.0.0.0-r611 asr9k-full-x64.iso-6.1.1 asr9k-xr-6.1.1 asr9k-mini-x64.iso-6.1.1 asr9k-xr-6.1.1 Engineering Packages External Names Internal Names asr9k-mcast-x64-2.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mcast-x64-2.0.0.0-r61116I asr9k-bgp-x64-1.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-bgp-x64-1.0.0.0-r61116I asr9k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.1.1.16I.DT_IMAGE asr9k-mgbl-x64-3.0.0.0-r61116I asr9k-full-x64.iso-6.1.1.16I.DT_IMAGE asr9k-full-x64-6.1.1.16I asr9k-mini-x64.iso-6.1.1.16I.DT_IMAGE asr9k-mini-x64-6.1.1.16I NCS5K Production Packages External Names Internal Names ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1 ncs5k-full-x.iso-6.0.1 ncs5k-xr-6.0.1 ncs5k-mini-x.iso-6.0.1 ncs5k-xr-6.0.1 ncs5k-mcast-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mcast-2.0.0.0-r601 ncs5k-mgbl-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mgbl-2.0.0.0-r601 ncs5k-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-mpls-2.0.0.0-r601 ncs5k-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-k9sec-2.0.0.0-r601 ncs5k-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-isis-2.0.0.0-r601 ncs5k-ospf-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5k-ospf-2.0.0.0-r601 Engineering Packages External Names Internal Names ncs5k-mgbl-x64-3.0.0.0-r61116I.x86_64.rpm-6.0.1.16I.DT_IMAGE ncs5k-mgbl-3.0.0.0-r60116I ncs5k-sysadmin.iso-6.0.1 ncs5k-sysadmin-6.0.1.26I ncs5k-full-x.iso-6.0.1.16I.DT_IMAGE ncs5k-xr-6.0.1.16I NCS5500 Production Packages External Names Internal Names ncs5500-eigrp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-eigrp-2.0.0.0-r601 ncs5500-isis-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-isis-2.0.0.0-r601 ncs5500-k9sec-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-k9sec-2.0.0.0-r601 ncs5500-m2m-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-m2m-2.0.0.0-r601 ncs5500-mgbl-3.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mgbl-3.0.0.0-r601 ncs5500-mini-x.iso-6.0.1 ncs5500-xr-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-te-rsvp-2.0.0.0-r601 ncs5500-mpls-2.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-mpls-2.0.0.0-r601 ncs5500-ospf-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-ospf-1.0.0.0-r601 ncs5500-parser-1.0.0.0-r601.x86_64.rpm-6.0.1 ncs5500-parser-1.0.0.0-r601 """ import re platforms = ['asr9k', 'ncs1k', 'ncs4k', 'ncs5k', 'ncs5500', 'ncs6k', 'xrv9k'] version_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": # 61117I or 611 or 6.1.1.17I or 6.1.1 re.compile("(?P<VERSION>(\d+\d+\d+(\d+\w+)?)|(\d+\.\d+\.\d+(\.\d+\w+)?)(?!\.\d)(?!-))"), "ncs4k ncs6k": # 5.2.4 or 5.2.4.47I re.compile("(?P<VERSION>\d+\.\d+\.\d+(\.\d+\w+)?)"), } smu_re = re.compile("(?P<SMU>CSC[a-z]{2}\d{5})") subversion_dict = {"asr9k ncs1k ncs5k ncs5500 xrv9k": re.compile("-(?P<SUBVERSION>\d+\.\d+\.\d+\.\d+)-"), # 2.0.0.0 "ncs4k ncs6k": re.compile("CSC.*(?P<SUBVERSION>\d+\.\d+\.\d+?)"), # 0.0.4 } def __repr__(self): return self.package_name def __str__(self): return self.__repr__()
42.86121
117
0.574643
d4d48c8aa150de0f108ac0a0655e92b6976fd528
41,579
py
Python
megaboat.py
xros/megaboat
e55e7959c39677ad2a0cdbb00ac88814b838d3e3
[ "MIT" ]
4
2015-06-07T18:44:02.000Z
2021-04-03T02:53:01.000Z
megaboat.py
xros/megaboat
e55e7959c39677ad2a0cdbb00ac88814b838d3e3
[ "MIT" ]
null
null
null
megaboat.py
xros/megaboat
e55e7959c39677ad2a0cdbb00ac88814b838d3e3
[ "MIT" ]
2
2015-03-27T04:24:55.000Z
2016-06-26T11:02:47.000Z
# -*- coding: utf-8 -*- # Copyright to Alexander Liu. # Any distrubites of this copy should inform its author. If for commercial, please inform the author for authentication. Apr 2014 import sys reload(sys) sys.setdefaultencoding('utf-8') from lxml import etree import time import json import urllib import urllib2 # For media posting from poster.encode import multipart_encode from poster.streaminghttp import register_openers # The down blow are the templates of all the responsing message valid for wechat # For more information, please visit : http://mp.weixin.qq.com/wiki/index.php?title=%E5%8F%91%E9%80%81%E8%A2%AB%E5%8A%A8%E5%93%8D%E5%BA%94%E6%B6%88%E6%81%AF global tpl_text global tpl_image global tpl_voice global tpl_video global tpl_music global tpl_news tpl_text = u'''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[text]]></MsgType> <Content><![CDATA[]]></Content> </xml>''' tpl_image = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[image]]></MsgType> <Image> <MediaId><![CDATA[media_id]]></MediaId> </Image> </xml>''' tpl_voice = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[voice]]></MsgType> <Voice> <MediaId><![CDATA[media_id]]></MediaId> </Voice> </xml>''' tpl_video = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[video]]></MsgType> <Video> <MediaId><![CDATA[media_id]]></MediaId> <Title><![CDATA[title]]></Title> <Description><![CDATA[description]]></Description> </Video> </xml>''' tpl_music = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[music]]></MsgType> <Music> <Title><![CDATA[TITLE]]></Title> <Description><![CDATA[DESCRIPTION]]></Description> <MusicUrl><![CDATA[MUSIC_Url]]></MusicUrl> <HQMusicUrl><![CDATA[HQ_MUSIC_Url]]></HQMusicUrl> <ThumbMediaId><![CDATA[media_id]]></ThumbMediaId> </Music> </xml>''' tpl_news = '''<xml> <ToUserName><![CDATA[toUser]]></ToUserName> <FromUserName><![CDATA[fromUser]]></FromUserName> <CreateTime>12345678</CreateTime> <MsgType><![CDATA[news]]></MsgType> <ArticleCount>2</ArticleCount> <Articles> <item> <Title><![CDATA[title1]]></Title> <Description><![CDATA[description1]]></Description> <PicUrl><![CDATA[picurl]]></PicUrl> <Url><![CDATA[url]]></Url> </item> <item> <Title><![CDATA[title]]></Title> <Description><![CDATA[description]]></Description> <PicUrl><![CDATA[picurl]]></PicUrl> <Url><![CDATA[url]]></Url> </item> </Articles> </xml>''' # Positive response json_text = '''{ "touser":"OPENID", "msgtype":"text", "text": { "content":"Hello World" } }''' json_image = '''{ "touser":"OPENID", "msgtype":"image", "image": { "media_id":"MEDIA_ID" } }''' json_voice = '''{ "touser":"OPENID", "msgtype":"voice", "voice": { "media_id":"MEDIA_ID" } }''' json_video = '''{ "touser":"OPENID", "msgtype":"video", "video": { "media_id":"MEDIA_ID", "title":"TITLE", "description":"DESCRIPTION" } }''' json_music = '''{ "touser":"OPENID", "msgtype":"music", "music": { "title":"MUSIC_TITLE", "description":"MUSIC_DESCRIPTION", "musicurl":"MUSIC_URL", "hqmusicurl":"HQ_MUSIC_URL", "thumb_media_id":"THUMB_MEDIA_ID" } }''' json_news = '''{ "touser":"OPENID", "msgtype":"news", "news":{ "articles": [ { "title":"Happy Day", "description":"Is Really A Happy Day", "url":"URL", "picurl":"PIC_URL" }, { "title":"Happy Day", "description":"Is Really A Happy Day", "url":"URL", "picurl":"PIC_URL" } ] } }''' def getAPIToken(appid='', appsecret=''): '''Get wechat API token for cusmter service or others. If ```appid``` and ```appsecret``` are correct then a string 'token' will be return. If not , 'return None' ''' default_url = 'https://api.weixin.qq.com/cgi-bin/token?grant_type=client_credential&' url = default_url + 'appid=' + appid + '&secret=' + appsecret try: a = urllib2.urlopen(url) except Exception as e: print e return None else: gotten = a.read() a_dict = json.loads(gotten) if a_dict.has_key('access_token'): return a_dict['access_token'] # means wrong appid or secret else: return None def postMessage2API(token='',messageString=''): '''Using the token, post the message to determained user. This returns a Boolean value''' url = "https://api.weixin.qq.com/cgi-bin/message/custom/send?access_token=" + token request = urllib2.Request(url, messageString) request.get_method = lambda : 'POST' try: response = urllib2.urlopen(request) except Exception as e: print e return False else: j = json.loads(response.read()) # The above works #print j # to check if the message was accepted if j['errcode'] == 0: return True else: return False
37.391187
577
0.527237
d4d56609e653c9ccb3c77b86d7440eff8168b7af
89
py
Python
root/converter/__init__.py
thasmarinho/root-image-editor
0c3e955a1f81be02fef9a488b2b45a44cf16930a
[ "MIT" ]
2
2020-08-01T02:51:48.000Z
2021-11-22T11:58:40.000Z
root/converter/__init__.py
thasmarinho/root-image-editor
0c3e955a1f81be02fef9a488b2b45a44cf16930a
[ "MIT" ]
4
2019-10-30T14:14:46.000Z
2022-03-11T23:57:52.000Z
root/converter/__init__.py
thasmarinho/root-image-editor
0c3e955a1f81be02fef9a488b2b45a44cf16930a
[ "MIT" ]
1
2021-02-21T12:18:05.000Z
2021-02-21T12:18:05.000Z
from .color_converter import ColorConverter from .scale_converter import ScaleConverter
22.25
43
0.876404
d4d7101b172b777d4c47f40c60724b8fe87dbf67
4,374
py
Python
chirun/plastex/color/__init__.py
sthagen/chirun-ncl-chirun
45897319d5203b9867b5d6e00b2db1aa90a6580c
[ "Apache-2.0" ]
5
2021-12-06T15:57:24.000Z
2022-01-24T20:34:00.000Z
chirun/plastex/color/__init__.py
sthagen/chirun-ncl-chirun
45897319d5203b9867b5d6e00b2db1aa90a6580c
[ "Apache-2.0" ]
38
2021-12-09T13:16:46.000Z
2022-03-30T11:42:13.000Z
chirun/plastex/color/__init__.py
sthagen/chirun-ncl-chirun
45897319d5203b9867b5d6e00b2db1aa90a6580c
[ "Apache-2.0" ]
1
2022-01-17T17:41:35.000Z
2022-01-17T17:41:35.000Z
from plasTeX import Command, Environment
36.45
118
0.560814
d4d711198a223af0615e717b95a37866d231b085
1,242
py
Python
ex035A11.py
gabrieleliasdev/python-cev
45390963b5112a982e673f6a6866da422bf9ae6d
[ "MIT" ]
null
null
null
ex035A11.py
gabrieleliasdev/python-cev
45390963b5112a982e673f6a6866da422bf9ae6d
[ "MIT" ]
null
null
null
ex035A11.py
gabrieleliasdev/python-cev
45390963b5112a982e673f6a6866da422bf9ae6d
[ "MIT" ]
null
null
null
print('\033[0;33;44mTeste\033[m') print('\033[4;33;44mTeste\033[m') print('\033[1;35;43mTeste\033[m') print('\033[7;32;40mTeste\033[m') print('\033[7;30mTeste\033[m') print(" - - - Testando os 40 - - -") print("\033[0;37;40mPreto\033[m") print("\033[0;30;41mVermelho\033[m") print("\033[0;30;42mVerde\033[m") print("\033[0;30;43mAmarelo\033[m") print("\033[0;30;44mRoxo\033[m") print("\033[0;30;45mLils\033[m") print("\033[0;30;46mTurquesa\033[m") print("\033[0;30;47mBranco\033[m") print("\033[0;36;48mFundo Transparente\033[m") print(" - - - Testando os 30 - - -") print("\033[0;37;40mTeste\033[m") print("\033[0;31;40mTeste\033[m") print("\033[0;32;40mTeste\033[m") print("\033[0;33;40mTeste\033[m") print("\033[0;34;40mTeste\033[m") print("\033[0;35;40mTeste\033[m") print("\033[0;36;40mTeste\033[m") print("\033[0;37;40mTeste\033[m") print("\033[0;38;40mTeste\033[m") print(" - - - Testando os 1 - - -") print("\033[0;30;47mTeste\033[m") print("\033[1;30;47mTexto em Negrito\033[m") print("\033[2;30;47mTeste\033[m") print("\033[3;30;47mFonta Itlica\033[m") print("\033[4;30;47mSublinhado\033[m") print("\033[5;30;47mTeste\033[m") print("\033[6;30;47mTeste\033[m") print("\033[7;30;47mTeste\033[m") print("\033[7;38;47mTeste\033[m")
33.567568
46
0.665056
d4d8056be31284c17cf40684370c5ac0209b3ede
1,296
py
Python
tg/release.py
TurboGears/tg2
f40a82d016d70ce560002593b4bb8f83b57f87b3
[ "MIT" ]
812
2015-01-16T22:57:52.000Z
2022-03-27T04:49:40.000Z
tg/release.py
KonstantinKlepikov/tg2
b230e98bf6f64b3620dcb4214fa45dafddb0d60f
[ "MIT" ]
74
2015-02-18T17:55:31.000Z
2021-12-13T10:41:08.000Z
tg/release.py
KonstantinKlepikov/tg2
b230e98bf6f64b3620dcb4214fa45dafddb0d60f
[ "MIT" ]
72
2015-06-10T06:02:45.000Z
2022-03-27T08:37:24.000Z
"""TurboGears project related information""" version = "2.4.3" description = "Next generation TurboGears" long_description=""" TurboGears brings together a best of breed python tools to create a flexible, full featured, and easy to use web framework. TurboGears 2 provides an integrated and well tested set of tools for everything you need to build dynamic, database driven applications. It provides a full range of tools for front end javascript develeopment, back database development and everything in between: * dynamic javascript powered widgets (ToscaWidgets2) * automatic JSON generation from your controllers * powerful, designer friendly XHTML based templating * object or route based URL dispatching * powerful Object Relational Mappers (SQLAlchemy) The latest development version is available in the `TurboGears Git repositories`_. .. _TurboGears Git repositories: https://github.com/TurboGears """ url="http://www.turbogears.org/" author= "Alessandro Molina, Mark Ramm, Christopher Perkins, Jonathan LaCour, Rick Copland, Alberto Valverde, Michael Pedersen and the TurboGears community" email = "[email protected]" copyright = """Copyright 2005-2020 Kevin Dangoor, Alberto Valverde, Mark Ramm, Christopher Perkins, Alessandro Molina and contributors""" license = "MIT"
41.806452
155
0.794753
d4d8cf9487b5b92aa26fd31970eb23caa185f9d2
816
py
Python
swm-master/swm-master/calc/mean_e_calc.py
m2lines/subgrid
3de5d14c5525a62529d43cbafccda716c74e32df
[ "MIT" ]
1
2021-11-03T01:27:16.000Z
2021-11-03T01:27:16.000Z
swm-master/swm-master/calc/mean_e_calc.py
m2lines/subgrid
3de5d14c5525a62529d43cbafccda716c74e32df
[ "MIT" ]
null
null
null
swm-master/swm-master/calc/mean_e_calc.py
m2lines/subgrid
3de5d14c5525a62529d43cbafccda716c74e32df
[ "MIT" ]
1
2021-06-24T15:58:32.000Z
2021-06-24T15:58:32.000Z
## PRODUCE MEAN CALCULATIONS AND EXPORT AS .NPY from __future__ import print_function path = '/home/mkloewer/python/swm/' import os; os.chdir(path) # change working directory import numpy as np from scipy import sparse import time as tictoc from netCDF4 import Dataset # OPTIONS runfolder = 15 print('Calculating subgrid-EKE means from run ' + str(runfolder)) ## read data runpath = path+'data/run%04i' % runfolder skip = 5*365 e = np.load(runpath+'/e_sub.npy')[skip:,:,:] print('run %i read.' % runfolder) ## create ouputfolder try: os.mkdir(runpath+'/analysis') except: pass ## U,V,H mean em = e.mean(axis=0) print('e mean done.') ## STORING dic = dict() all_var2export = ['em'] for v in all_var2export: exec('dic[v] ='+v) np.save(runpath+'/analysis/mean_e.npy',dic) print('Everything stored.')
20.4
65
0.704657
d4d8f82be29e6cb13695308004bac74a741d2095
8,111
py
Python
bogglesolver.py
gammazero/pybogglesolver
71d2c6d6ae8c9b5f580f6b27479aea3450a2895a
[ "MIT" ]
null
null
null
bogglesolver.py
gammazero/pybogglesolver
71d2c6d6ae8c9b5f580f6b27479aea3450a2895a
[ "MIT" ]
null
null
null
bogglesolver.py
gammazero/pybogglesolver
71d2c6d6ae8c9b5f580f6b27479aea3450a2895a
[ "MIT" ]
null
null
null
""" Module to generate solutions for Boggle grids. Andrew Gillis 22 Dec. 2009 """ from __future__ import print_function import os import sys import collections import trie if sys.version < '3': range = xrange
31.076628
79
0.501911
d4db73effedd714b6a4d9b15c4a8d627cf47c849
1,151
py
Python
tests/manage/monitoring/pagerduty/test_ceph.py
MeridianExplorer/ocs-ci
a33d5116128b88f176f5eff68a3ef805125cdba1
[ "MIT" ]
null
null
null
tests/manage/monitoring/pagerduty/test_ceph.py
MeridianExplorer/ocs-ci
a33d5116128b88f176f5eff68a3ef805125cdba1
[ "MIT" ]
null
null
null
tests/manage/monitoring/pagerduty/test_ceph.py
MeridianExplorer/ocs-ci
a33d5116128b88f176f5eff68a3ef805125cdba1
[ "MIT" ]
null
null
null
import logging import pytest from ocs_ci.framework.testlib import ( managed_service_required, skipif_ms_consumer, tier4, tier4a, ) from ocs_ci.ocs import constants from ocs_ci.utility import pagerduty log = logging.getLogger(__name__)
26.159091
80
0.741095
d4db81ffa51e39a4b08cb2f618fbc4f85e8db0b8
3,442
py
Python
STANchap7.py
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan
d708faab0fdd43800e8726e2c6dd99452c8dcedb
[ "Unlicense" ]
1
2021-03-18T08:01:32.000Z
2021-03-18T08:01:32.000Z
STANchap7.py
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan
d708faab0fdd43800e8726e2c6dd99452c8dcedb
[ "Unlicense" ]
null
null
null
STANchap7.py
phineas-pta/Bayesian-Methods-for-Hackers-using-PyStan
d708faab0fdd43800e8726e2c6dd99452c8dcedb
[ "Unlicense" ]
null
null
null
# -*- coding: utf-8 -*- import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns from cmdstanpy import CmdStanModel #%% load data data = pd.read_csv("data/overfitting.csv", index_col = 'case_id') data.columns data.info() feature_names = data.columns.str.startswith("var_") predictors = data[data.columns[feature_names]] labels = data["Target_Practice"] ix_training = data.train == 1 training_data = predictors[ix_training] training_labels = labels[ix_training] ix_testing = data.train == 0 testing_data = predictors[ix_testing] testing_labels = labels[ix_testing] sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True) pca = prince.PCA(n_components = 2, as_array = False).fit(training_data) pca.plot_row_coordinates(training_data, color_labels = training_labels) pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name #%% Roshan Sharma model mdl_data = { # problem with JSON dump => cast to python native type 'N': ix_training.sum().tolist(), 'N2': ix_testing.sum().tolist(), 'K': feature_names.sum().tolist(), 'y': training_labels.values.tolist(), 'X': training_data.values.tolist(), 'new_X': testing_data.values.tolist(), } modelfile = "OverfittingRoshanSharma.stan" with open(modelfile, "w") as file: file.write(""" data { int N; // the number of training observations int N2; // the number of test observations int K; // the number of features int y[N]; // the response matrix[N,K] X; // the model matrix matrix[N2,K] new_X; // the matrix for the predicted values } parameters { // regression parameters real alpha; vector[K] beta; } transformed parameters { vector[N] linpred = alpha + X * beta; } model { alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008 beta ~ student_t(1, 0, 0.03); y ~ bernoulli_logit(linpred); } generated quantities { // y values predicted by the model vector[N2] y_pred = alpha + new_X * beta; } """) var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])] var_name_combi = ["alpha", "beta"] sm = CmdStanModel(stan_file = modelfile) # maximum likelihood estimation optim = sm.optimize(data = mdl_data).optimized_params_pd optim[optim.columns[~optim.columns.str.startswith("lp")]] plt.plot(optim[var_name_array[1:]].values[0]) # variational inference vb = sm.variational(data = mdl_data) vb.variational_sample.columns = vb.variational_params_dict.keys() vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))] vb.variational_params_pd[var_name_array] vb.variational_sample[var_name_array] # Markov chain Monte Carlo fit = sm.sample( data = mdl_data, show_progress = True, chains = 4, iter_sampling = 50000, iter_warmup = 10000, thin = 5 ) fit.draws().shape # iterations, chains, parameters fit.summary().loc[var_name_array] # pandas DataFrame print(fit.diagnose()) posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi} az_trace = az.from_cmdstanpy(fit) az.summary(az_trace).loc[var_name] # pandas DataFrame az.plot_trace(az_trace, var_names = ["alpha"]) az.plot_forest(az_trace, var_names = ["beta"]) sample_pred = fit.stan_variable('y_pred') # Tim Salimans model: DOES NOT WORK yet # need to figure out how to marginalize all discrete params
31.577982
109
0.70889
d4dcaac9477532add98d53c114feaaa486ee4a47
4,206
py
Python
watcher.py
factabulous/matgrindr
6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e
[ "Apache-2.0" ]
1
2018-03-31T12:15:07.000Z
2018-03-31T12:15:07.000Z
watcher.py
factabulous/matgrindr
6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e
[ "Apache-2.0" ]
null
null
null
watcher.py
factabulous/matgrindr
6f5d6d20e34f9b13950d654cf70afdb2e46f5d1e
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- import json import threading import os import time import mats import sys import requests import traceback import re from util import debug, error
33.11811
195
0.551831
d4dccf62068146e1f5c5000f7700eb596a2140ec
1,706
py
Python
luoxia/pipelines.py
pighui/luoxia
24daa0f1595fd2b18a4b251acf77321ef98eb534
[ "MIT" ]
2
2019-11-07T09:27:59.000Z
2019-11-16T11:36:12.000Z
luoxia/pipelines.py
pighui/luoxia
24daa0f1595fd2b18a4b251acf77321ef98eb534
[ "MIT" ]
5
2021-03-31T19:15:38.000Z
2022-03-02T14:57:57.000Z
luoxia/pipelines.py
pighui/luoxia
24daa0f1595fd2b18a4b251acf77321ef98eb534
[ "MIT" ]
1
2019-11-12T12:59:22.000Z
2019-11-12T12:59:22.000Z
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html import os from scrapy import Request from scrapy.pipelines.images import ImagesPipeline from luoxia import settings
32.807692
76
0.601407
d4df00044c8b020894b3ff8a98bbdaaae75f9a17
6,949
py
Python
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
[ "Apache-2.0" ]
5
2019-01-19T23:53:35.000Z
2022-01-29T14:04:31.000Z
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
[ "Apache-2.0" ]
4
2020-09-26T01:25:36.000Z
2021-08-25T16:10:50.000Z
aws_sagemaker_studio/frameworks/tensorflow_mnist/mnist.py
jpmarques19/tensorflwo-test
0ff8b06e0415075c7269820d080284a42595bb2e
[ "Apache-2.0" ]
7
2020-03-04T22:23:51.000Z
2021-07-13T14:05:46.000Z
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Convolutional Neural Network Estimator for MNIST, built with tf.layers.""" from __future__ import absolute_import, division, print_function import argparse import json import os import numpy as np import tensorflow as tf def cnn_model_fn(features, labels, mode): """Model function for CNN.""" # Input Layer # Reshape X to 4-D tensor: [batch_size, width, height, channels] # MNIST images are 28x28 pixels, and have one color channel input_layer = tf.reshape(features['x'], [-1, 28, 28, 1]) # Convolutional Layer #1 # Computes 32 features using a 5x5 filter with ReLU activation. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 28, 28, 1] # Output Tensor Shape: [batch_size, 28, 28, 32] conv1 = tf.layers.conv2d( inputs=input_layer, filters=32, kernel_size=[5, 5], padding='same', activation=tf.nn.relu ) # Pooling Layer #1 # First max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 28, 28, 32] # Output Tensor Shape: [batch_size, 14, 14, 32] pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2) # Convolutional Layer #2 # Computes 64 features using a 5x5 filter. # Padding is added to preserve width and height. # Input Tensor Shape: [batch_size, 14, 14, 32] # Output Tensor Shape: [batch_size, 14, 14, 64] conv2 = tf.layers.conv2d( inputs=pool1, filters=64, kernel_size=[5, 5], padding='same', activation=tf.nn.relu ) # Pooling Layer #2 # Second max pooling layer with a 2x2 filter and stride of 2 # Input Tensor Shape: [batch_size, 14, 14, 64] # Output Tensor Shape: [batch_size, 7, 7, 64] pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2) # Flatten tensor into a batch of vectors # Input Tensor Shape: [batch_size, 7, 7, 64] # Output Tensor Shape: [batch_size, 7 * 7 * 64] pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64]) # Dense Layer # Densely connected layer with 1024 neurons # Input Tensor Shape: [batch_size, 7 * 7 * 64] # Output Tensor Shape: [batch_size, 1024] dense = tf.layers.dense(inputs=pool2_flat, units=1024, activation=tf.nn.relu) # Add dropout operation; 0.6 probability that element will be kept dropout = tf.layers.dropout( inputs=dense, rate=0.4, training=mode == tf.estimator.ModeKeys.TRAIN) # Logits layer # Input Tensor Shape: [batch_size, 1024] # Output Tensor Shape: [batch_size, 10] logits = tf.layers.dense(inputs=dropout, units=10) predictions = { # Generate predictions (for PREDICT and EVAL mode) 'classes': tf.argmax(input=logits, axis=1), # Add `softmax_tensor` to the graph. It is used for PREDICT and by the # `logging_hook`. 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate Loss (for both TRAIN and EVAL modes) loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits) # Configure the Training Op (for TRAIN mode) if mode == tf.estimator.ModeKeys.TRAIN: optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) train_op = optimizer.minimize( loss=loss, global_step=tf.train.get_global_step()) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op) # Add evaluation metrics (for EVAL mode) eval_metric_ops = { 'accuracy': tf.metrics.accuracy( labels=labels, predictions=predictions['classes'])} return tf.estimator.EstimatorSpec( mode=mode, loss=loss, eval_metric_ops=eval_metric_ops) if __name__ == '__main__': args, _ = _parse_args() train_data, train_labels = _load_training_data(args.train) eval_data, eval_labels = _load_testing_data(args.train) # Create the Estimator mnist_classifier = tf.estimator.Estimator(model_fn=cnn_model_fn, model_dir=args.model_dir) # Set up logging for predictions # Log the values in the 'Softmax' tensor with label 'probabilities' tensors_to_log = {'probabilities': 'softmax_tensor'} logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=50) # Train the model train_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': train_data}, y=train_labels, batch_size=100, num_epochs=None, shuffle=True ) # Evaluate the model and print results eval_input_fn = tf.estimator.inputs.numpy_input_fn( x={'x': eval_data}, y=eval_labels, num_epochs=1, shuffle=False ) train_spec = tf.estimator.TrainSpec(train_input_fn, max_steps=20000) eval_spec = tf.estimator.EvalSpec(eval_input_fn) tf.estimator.train_and_evaluate(mnist_classifier, train_spec, eval_spec) if args.current_host == args.hosts[0]: mnist_classifier.export_savedmodel(args.sm_model_dir, serving_input_fn)
37.160428
94
0.689164
d4e1891a34dd9a85739bf4476b3f8a83de7af2b1
6,002
py
Python
common/util/autoware_debug_tools/scripts/stop_reason2pose.py
loop-perception/AutowareArchitectureProposal.iv
5d8dff0db51634f0c42d2a3e87ca423fbee84348
[ "Apache-2.0" ]
12
2020-09-25T08:52:59.000Z
2020-10-05T02:39:31.000Z
common/util/autoware_debug_tools/scripts/stop_reason2pose.py
loop-perception/AutowareArchitectureProposal.iv
5d8dff0db51634f0c42d2a3e87ca423fbee84348
[ "Apache-2.0" ]
7
2021-12-13T04:28:48.000Z
2022-03-14T13:53:15.000Z
common/util/autoware_debug_tools/scripts/stop_reason2pose.py
taikitanaka3/AutowareArchitectureProposal.iv
0d47ea532118c98458516a8c83fbdab3d27c6231
[ "Apache-2.0" ]
9
2020-09-27T05:27:09.000Z
2020-10-08T03:14:25.000Z
#! /usr/bin/env python3 # Copyright 2020 Tier IV, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import math import sys from autoware_planning_msgs.msg import StopReasonArray from case_converter import pascal2snake from geometry_msgs.msg import PoseStamped import numpy as np import rclpy from rclpy.node import Node from rtree import index from self_pose_listener import SelfPoseListener if __name__ == "__main__": main(sys.argv[1:])
35.72619
94
0.653615
d4e302bb88e4c014fa9f911add690a08d53c06f0
2,578
py
Python
aiounittest/case.py
tmaila/aiounittest
c43d3b619fd6a8fd071758996a5f42310b0293dc
[ "MIT" ]
55
2017-08-18T10:24:05.000Z
2022-03-21T08:29:19.000Z
aiounittest/case.py
tmaila/aiounittest
c43d3b619fd6a8fd071758996a5f42310b0293dc
[ "MIT" ]
15
2017-09-22T13:14:43.000Z
2022-01-23T16:29:22.000Z
aiounittest/case.py
tmaila/aiounittest
c43d3b619fd6a8fd071758996a5f42310b0293dc
[ "MIT" ]
4
2019-11-26T18:08:43.000Z
2021-06-01T22:12:00.000Z
import asyncio import unittest from .helpers import async_test
29.976744
152
0.600465
d4e335bc88c686cd971644ea0114064bbbe14924
1,551
py
Python
US Flag.py
Code-Master1234/Turtle_Flags_File_Hub
d99f8bc05c4f2280f8c91cdda14005ef9c5d6236
[ "MIT" ]
null
null
null
US Flag.py
Code-Master1234/Turtle_Flags_File_Hub
d99f8bc05c4f2280f8c91cdda14005ef9c5d6236
[ "MIT" ]
null
null
null
US Flag.py
Code-Master1234/Turtle_Flags_File_Hub
d99f8bc05c4f2280f8c91cdda14005ef9c5d6236
[ "MIT" ]
null
null
null
import turtle as t t.penup() gotoy = 222 t.speed(0) t.setup(988,520) t.goto(494,260) t.pendown() for counter in range(7): t.setheading(-90) rectangle(40,988,'#B22234') t.setheading(-90) t.forward(80) t.penup() t.setheading(0) t.goto(-494,260) t.pendown() rectangle(494,280,'#3C3B6E') t.goto(-474,245) for counter in range(4): for counter in range(6): star(9,5,'white') t.setheading(0) t.forward(84) t.penup() t.goto(-434,gotoy) gotoy = gotoy - 28 t.pendown() for counter in range(5): star(9,5,'white') t.setheading(0) t.forward(84) t.goto(-476,gotoy) gotoy = gotoy - 28 for counter in range(6): star(9,5,'white') t.setheading(0) t.forward(84) t.penup() t.hideturtle()
19.884615
44
0.550613
d4e4309129dbca39258000122d1486ad109432d7
1,107
py
Python
linked-list/delete_zero_sum_nodes.py
bryanlimy/technical-interview
f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6
[ "MIT" ]
3
2020-01-20T05:12:52.000Z
2022-02-09T15:21:42.000Z
linked-list/delete_zero_sum_nodes.py
bryanlimy/technical-interview
f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6
[ "MIT" ]
null
null
null
linked-list/delete_zero_sum_nodes.py
bryanlimy/technical-interview
f888a4fb2bc4d34dda6cd74b6e4215f46d5ce6d6
[ "MIT" ]
null
null
null
# Given a linked list, remove consecutive nodes that sums up to zero # https://www.careercup.com/question?id=5717797377146880 from util import * if __name__ == "__main__": s1 = [6, -6, 8, 4, -12, 9, 8, -8] s2 = [4, 6 - 10, 8, 9, 10, -19, 10, -18, 20, 25] s3 = [2, 3, -5, 10, 10, -5, -5, 20, 5, -5] samples = [s1,s2,s3] for sample in samples: head = create_linked_list(sample) print(linked_list_to_list(head)) result = remove_zero_sum(head) print(linked_list_to_list(result)) print("\n")
26.357143
68
0.525745
d4e708b09e82bdf3236441c1829a0dda6f660d73
2,383
py
Python
src/azure-cli/azure/cli/command_modules/maps/custom.py
psignoret/azure-cli
1a4a043750315f9a7f2894b4287126089978b615
[ "MIT" ]
1
2019-11-15T17:28:05.000Z
2019-11-15T17:28:05.000Z
src/azure-cli/azure/cli/command_modules/maps/custom.py
psignoret/azure-cli
1a4a043750315f9a7f2894b4287126089978b615
[ "MIT" ]
2
2021-01-15T09:24:07.000Z
2021-01-15T09:30:10.000Z
src/azure-cli/azure/cli/command_modules/maps/custom.py
psignoret/azure-cli
1a4a043750315f9a7f2894b4287126089978b615
[ "MIT" ]
1
2019-11-25T19:33:05.000Z
2019-11-25T19:33:05.000Z
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from knack.log import get_logger from knack.prompting import prompt_y_n from knack.util import CLIError from azure.mgmt.maps.models import ( MapsAccountCreateParameters, Sku) ACCOUNT_LOCATION = 'global' logger = get_logger(__name__)
40.389831
107
0.669744
d4e813b035bc0fbeece6fd5910d8e62ac5025f2b
5,558
py
Python
examples/wsdm2022/run_seqreco_B.py
Leavingseason/wsdm2022-seqrecsys
4659edb93a96300d7a52bb0e1b9c912e3fae2a76
[ "MIT" ]
null
null
null
examples/wsdm2022/run_seqreco_B.py
Leavingseason/wsdm2022-seqrecsys
4659edb93a96300d7a52bb0e1b9c912e3fae2a76
[ "MIT" ]
null
null
null
examples/wsdm2022/run_seqreco_B.py
Leavingseason/wsdm2022-seqrecsys
4659edb93a96300d7a52bb0e1b9c912e3fae2a76
[ "MIT" ]
null
null
null
import sys import os from tempfile import TemporaryDirectory import numpy as np import tensorflow.compat.v1 as tf tf.get_logger().setLevel('ERROR') # only show error messages from recommenders.utils.timer import Timer from recommenders.utils.constants import SEED from recommenders.models.deeprec.deeprec_utils import ( prepare_hparams ) from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab from recommenders.datasets.download_utils import maybe_download from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel # from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel # from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel # from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel # from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel #from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator #from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator print("System version: {}".format(sys.version)) print("Tensorflow version: {}".format(tf.__version__)) yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml' RANDOM_SEED = SEED # Set None for non-deterministic result # data_path = os.path.join("tests", "resources", "deeprec", "slirec") # data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2' data_path = sys.argv[1] print(os.path.abspath(data_path)) ## the path where I enter the cmd # for test train_file = os.path.join(data_path, r'train_instances.txt') valid_file = os.path.join(data_path, r'valid_instances.txt') test_file = os.path.join(data_path, r'valid.tsv') pred_file = os.path.join(data_path, r'inter_test.tsv') final_pred_file = os.path.join(data_path, r'final_test.tsv') user_vocab = os.path.join(data_path, r'user_vocab.pkl') item_vocab = os.path.join(data_path, r'item_vocab.pkl') cate_vocab = os.path.join(data_path, r'category_vocab.pkl') output_file = os.path.join(data_path, r'inter_test_output.txt') submit_file = os.path.join(data_path, r'final_test_output.txt') train_num_ngs = 9 # number of negative instances with a positive instance for training valid_num_ngs = 9 # number of negative instances with a positive instance for validation test_num_ngs = 9 # number of negative instances with a positive instance for testing _create_vocab( [train_file, valid_file], user_vocab, item_vocab, cate_vocab ) ### NOTE: ### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset. hparams = prepare_hparams(yaml_file, # user_dropout=False, embed_l2=0., layer_l2=0., enable_BN=True, ##-- True learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001 epochs=100000, EARLY_STOP=40000, batch_size=400, show_step=5000, MODEL_DIR=os.path.join(data_path, "model/"), SUMMARIES_DIR=os.path.join(data_path, "summary/"), user_vocab=user_vocab, item_vocab=item_vocab, cate_vocab=cate_vocab, need_sample=False, train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation. loss='log_loss', #'log_loss', 'softmax' max_seq_length=50, cont_feat_len=85, use_cont_feat=False, init_item_emb=False, shuffle=True ) print(hparams.values) input_creator = SequentialIterator model = SeqModel(hparams, input_creator, seed=RANDOM_SEED) # model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000')) with Timer() as train_time: model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc') print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0)) ### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test model.load_model(os.path.join(data_path, "model", 'best_model')) res_syn = model.run_eval(test_file, num_ngs=9) print(res_syn) model.predict(pred_file, output_file) model.predict(final_pred_file, submit_file) # print('Job finished. B, continue training = 20k, seq=50') # print('Job finished. B_v2, epoch=50k, seq=100') ## ASVD: 0.867497 ## GRU: 0.877529 ## SLi-Rec: 0.892736 ## B_v4: 0.8937 print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True") ## B_full_feature_v2 no cont_feat, with BN ##5k: 0.8778 ##10k: 0.8827 ##20k: 0.8848 ##25k: 0.8824 ##35k: 0.8878 ##40k: 0.8903 ##45k: 0.8876 ##50k: 0.8925 ##55k: 0.8903 ##60k: 0.8894 ##65k: 0.8904 ##70k: 0.8814 ##75k: 0.8896 ##80k: 0.8871 ##85k: 0.8920 ## with shuffle: ##5k: 0.8793 ##10k: 0.8884 ##15k: 0.8898 ##20k: 0.8923 ##25k: 0.8908 ##30k: 0.8895 ##35k: 0.8888 ##40k: 0.8913 ##45k: 0.8909 ##50k: 0.8876 ##65k: 0.8881
37.302013
221
0.690896
d4e8209a5a512c6f4d48304a062ee3d210b0266c
11,222
py
Python
ctypesgen/ctypedescs.py
fgrie/ctypesgen
bc1627648a1479cefd1a2c3c261dd0471358cfff
[ "BSD-2-Clause" ]
null
null
null
ctypesgen/ctypedescs.py
fgrie/ctypesgen
bc1627648a1479cefd1a2c3c261dd0471358cfff
[ "BSD-2-Clause" ]
null
null
null
ctypesgen/ctypedescs.py
fgrie/ctypesgen
bc1627648a1479cefd1a2c3c261dd0471358cfff
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python """ ctypesgen.ctypedescs contains classes to represent a C type. All of them classes are subclasses of CtypesType. Unlike in previous versions of ctypesgen, CtypesType and its subclasses are completely independent of the parser module. The most important method of CtypesType and its subclasses is the py_string method. str(ctype) returns a string which, when evaluated in the wrapper at runtime, results in a ctypes type object. For example, a CtypesType representing an array of four integers could be created using: >>> ctype = CtypesArray(CtypesSimple("int",True,0),4) str(ctype) would evaluate to "c_int * 4". """ import warnings __docformat__ = "restructuredtext" ctypes_type_map = { # typename signed longs ("void", True, 0): "None", ("int", True, 0): "c_int", ("int", False, 0): "c_uint", ("int", True, 1): "c_long", ("int", False, 1): "c_ulong", ("char", True, 0): "c_char", ("char", False, 0): "c_ubyte", ("short", True, 0): "c_short", ("short", False, 0): "c_ushort", ("float", True, 0): "c_float", ("double", True, 0): "c_double", ("double", True, 1): "c_longdouble", ("int8_t", True, 0): "c_int8", ("__int8", True, 0): "c_int8", ("int16_t", True, 0): "c_int16", ("__int16", True, 0): "c_int16", ("int32_t", True, 0): "c_int32", ("__int32", True, 0): "c_int32", ("int64_t", True, 0): "c_int64", ("__int64", True, 0): "c_int64", ("uint8_t", True, 0): "c_uint8", ("uint16_t", True, 0): "c_uint16", ("uint32_t", True, 0): "c_uint32", ("uint64_t", True, 0): "c_uint64", ("_Bool", True, 0): "c_bool", } ctypes_type_map_python_builtin = { ("int", True, 2): "c_longlong", ("int", False, 2): "c_ulonglong", ("size_t", True, 0): "c_size_t", ("apr_int64_t", True, 0): "c_int64", ("off64_t", True, 0): "c_int64", ("apr_uint64_t", True, 0): "c_uint64", ("wchar_t", True, 0): "c_wchar", ("ptrdiff_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble ("ssize_t", True, 0): "c_ptrdiff_t", # Requires definition in preamble ("va_list", True, 0): "c_void_p", } # This protocol is used for walking type trees. # Remove one level of indirection from funtion pointer; needed for typedefs # and function parameters. last_tagnum = 0 last_tagnum = 0
28.848329
90
0.621012
d4e990995bc970a5eeb5c450531463a5dff36df5
2,026
py
Python
pytouch/elements.py
Krai53n/pytouch
8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375
[ "MIT" ]
null
null
null
pytouch/elements.py
Krai53n/pytouch
8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375
[ "MIT" ]
null
null
null
pytouch/elements.py
Krai53n/pytouch
8a1c69c4ba5981f3cb0bf00db3bcef5dd15e8375
[ "MIT" ]
null
null
null
from random import randint import pyxel from constants import Screen import cursors class ReachCircle(Circle): def respawn(self): self._x = randint(self._r, Screen.width - self._r) self._y = randint(self._r, Screen.height - self._r) self._r = randint(self.min_r, min(Screen.width, Screen.height) // 2) - 4 def draw(self): pyxel.circb(self._x, self._y, self._r, self._col)
21.104167
80
0.579961
d4e9e1912fd06e0dea7f2e62b354d4050bf65bf1
1,769
py
Python
app/volume/admin_process.py
cleve/varidb
fc1b10aa4d708cee1c83909f10773948cee0c539
[ "Apache-2.0" ]
null
null
null
app/volume/admin_process.py
cleve/varidb
fc1b10aa4d708cee1c83909f10773948cee0c539
[ "Apache-2.0" ]
6
2020-11-05T02:18:15.000Z
2022-03-12T00:50:09.000Z
app/volume/admin_process.py
cleve/pulzar
fc1b10aa4d708cee1c83909f10773948cee0c539
[ "Apache-2.0" ]
null
null
null
from pulzarutils.utils import Utils from pulzarutils.utils import Constants from pulzarutils.messenger import Messenger from pulzarcore.core_db import DB
38.456522
84
0.611645
d4ea75a1746392a1bad32c927e9dd06c16722c29
2,767
py
Python
tests/ssg_test_suite/profile.py
fduthilleul/scap-security-guide
f9b67869600f6c20dcb0ba83801578cec1a51bba
[ "BSD-3-Clause" ]
null
null
null
tests/ssg_test_suite/profile.py
fduthilleul/scap-security-guide
f9b67869600f6c20dcb0ba83801578cec1a51bba
[ "BSD-3-Clause" ]
null
null
null
tests/ssg_test_suite/profile.py
fduthilleul/scap-security-guide
f9b67869600f6c20dcb0ba83801578cec1a51bba
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python2 from __future__ import print_function import atexit import logging import sys import ssg_test_suite.oscap import ssg_test_suite.virt from ssg_test_suite.rule import get_viable_profiles from ssg_test_suite.virt import SnapshotStack logging.getLogger(__name__).addHandler(logging.NullHandler()) def perform_profile_check(options): """Perform profile check. Iterate over profiles in datastream and perform scanning of unaltered VM using every profile according to input. Also perform remediation run. Return value not defined, textual output and generated reports is the result. """ dom = ssg_test_suite.virt.connect_domain(options.hypervisor, options.domain_name) if dom is None: sys.exit(1) snapshot_stack = SnapshotStack(dom) atexit.register(snapshot_stack.clear) snapshot_stack.create('origin') ssg_test_suite.virt.start_domain(dom) domain_ip = ssg_test_suite.virt.determine_ip(dom) has_worked = False profiles = get_viable_profiles(options.target, options.datastream, options.benchmark_id) if len(profiles) > 1: snapshot_stack.create('profile') for profile in profiles: logging.info("Evaluation of profile {0}.".format(profile)) has_worked = True runner = options.remediate_using ssg_test_suite.oscap.run_profile(domain_ip, profile, 'initial', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'remediation', options.datastream, options.benchmark_id, runner=runner) ssg_test_suite.oscap.run_profile(domain_ip, profile, 'final', options.datastream, options.benchmark_id, runner=runner) snapshot_stack.revert(delete=False) if not has_worked: logging.error("Nothing has been tested!") snapshot_stack.delete() # depending on number of profiles we have either "origin" snapshot # still to be reverted (multiple profiles) or we are reverted # completely (only one profile was run)
38.971831
76
0.553668
d4eb283ef9b63b6cf71ae47aefac07d2d47fad48
4,218
py
Python
lib/wtforms/ext/appengine/fields.py
solidaritreebiz/Solidaritree
15cc2e10e4cec56eb4fe218166d4157fcce9bf8d
[ "MIT" ]
43
2015-01-02T11:59:27.000Z
2021-06-03T18:47:09.000Z
wtforms/ext/appengine/fields.py
skorokithakis/landing-page
d800decb3a36519e2dd86826f660f5fa4f62cf5c
[ "MIT" ]
1
2018-07-17T11:46:14.000Z
2018-07-17T11:46:14.000Z
wtforms/ext/appengine/fields.py
skorokithakis/landing-page
d800decb3a36519e2dd86826f660f5fa4f62cf5c
[ "MIT" ]
6
2018-07-14T04:58:02.000Z
2018-08-06T18:02:27.000Z
import decimal import operator import warnings from wtforms import fields, widgets
35.745763
116
0.598151
d4eb7fe555f324704c58058f0e711c3b4fd6b7fe
3,947
py
Python
mtrainsimulator.py
trevor-wieland/MTrainAI
47bab3bf3af9e5426a822a7d14586f1798674cd7
[ "MIT" ]
null
null
null
mtrainsimulator.py
trevor-wieland/MTrainAI
47bab3bf3af9e5426a822a7d14586f1798674cd7
[ "MIT" ]
null
null
null
mtrainsimulator.py
trevor-wieland/MTrainAI
47bab3bf3af9e5426a822a7d14586f1798674cd7
[ "MIT" ]
null
null
null
import mtrain import numpy as np import pandas as pd import random def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True, debug=False, players=["Random", "Greedy", "Probability", "Neural"], file_name="PlayData/data4_12_250"): """ Runs the mexican train game repeatedly with different combinations of players to generate data to be used in testing and training the neural net. If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use The format for the file name for this is as follows: PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx This spreadsheet is to be used when training the neural net. This script has no required parameters, and will run the game with the default params if unchanged. If collect_data is on, the players are selected randomly each game from: ["Random", "Greedy", "Probability"] If collect_data is off, the players are selected in order from the parameter players. When collect_data is off: len(players) must equal num_players Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players """ #Sets column names for building dataframe later on column_names = ["round_number", "turn_number", "player_number", "play", "t_num", "hand", "unknown", "potential_plays", "points"] #Depending on mode of use, sets players and checks validity of player values modes = [] if collect_data: modes = ["Random", "Greedy", "Probability"] else: if not len(players) == num_players: raise RuntimeError("len(players) must equal num_players when collect_data is off") modes = players #Simulates num_games of games scores = np.ndarray((num_players, num_games)) wins = np.ndarray((num_players, num_games)) full_data = pd.DataFrame(columns=column_names) current_index = 0 for game_num in range(0, num_games): #Randomize players if in collect_data mode game_modes = [] if collect_data: for select in range(0, num_players): game_modes.append(random.choice(modes)) else: game_modes = modes #Run game with parameters results = mtrain.mexicantrain(num_players, domino_size, debug=debug, modes=game_modes, data_collection=collect_data, data_index=current_index, file_name=file_name) #If collecting data, data is stored into the dataframe if collect_data: current_index = results[2].index[-1] + 1 full_data = pd.concat([full_data, results[2]]) #Scores and wins are recorded into their respective arrays for player_num in range(0, num_players): scores[player_num, game_num] = results[0][player_num] if results[1] == player_num: wins[player_num, game_num] = 1 else: wins[player_num, game_num] = 0 #Calculates performance of the players score_averages = np.ndarray((num_players)) win_percentage = np.ndarray((num_players)) for player_num in range(0, num_players): score_averages[player_num] = np.mean(scores[player_num, :]) win_percentage[player_num] = np.mean(wins[player_num, :]) #If collecting data, prints data to a .xlsx file if collect_data: filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx" writer = pd.ExcelWriter(filename) full_data.to_excel(writer, "Sheet1") writer.save() #Prints results and returns them as well if debug: print(score_averages) if debug: print(win_percentage) return score_averages, win_percentage
42.902174
111
0.64682
d4ec0fc927b4e34cca6fab5d967b009070fe2201
294
py
Python
dml/errors.py
RGBCube/dml
f551821545a062e15aea1f2c2444e6016748ea34
[ "MIT" ]
2
2022-03-19T19:15:28.000Z
2022-03-19T19:15:32.000Z
dml/errors.py
RGBCube/dml
f551821545a062e15aea1f2c2444e6016748ea34
[ "MIT" ]
null
null
null
dml/errors.py
RGBCube/dml
f551821545a062e15aea1f2c2444e6016748ea34
[ "MIT" ]
null
null
null
__all__ = ("DottedMarkupLanguageException", "DecodeError")
24.5
58
0.744898
d4ec2af4e9b7cc307999482d71c793953e387022
3,336
py
Python
licenseplates/dataset.py
VaranRohila/apn
dbb5b814233accbbb49b9bfe12b7162402e3b267
[ "MIT" ]
null
null
null
licenseplates/dataset.py
VaranRohila/apn
dbb5b814233accbbb49b9bfe12b7162402e3b267
[ "MIT" ]
null
null
null
licenseplates/dataset.py
VaranRohila/apn
dbb5b814233accbbb49b9bfe12b7162402e3b267
[ "MIT" ]
null
null
null
############################################################################## # # Below code is inspired on # https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py # -------------------------------------------------------- # Detectron2 # Licensed under the Apache 2.0 license. # -------------------------------------------------------- from fvcore.common.file_io import PathManager import os import numpy as np import xml.etree.ElementTree as ET from detectron2.structures import BoxMode from detectron2.data import DatasetCatalog, MetadataCatalog __all__ = ["register_licenseplates_voc"] CLASS_NAMES = [ "license_plate", ] def load_voc_instances(dirname: str, split: str): """ Load licenseplates VOC detection annotations to Detectron2 format. Args: dirname: Contain "annotations", "images" split (str): one of "train", "test" """ with PathManager.open(os.path.join(dirname, split + ".txt")) as f: fileids = np.loadtxt(f, dtype=np.str) dicts = [] for fileid in fileids: anno_file = os.path.join(dirname, "annotations", fileid + ".xml") jpeg_file = os.path.join(dirname, "images", fileid + ".jpg") tree = ET.parse(anno_file) r = { "file_name": jpeg_file, "image_id": fileid, "height": int(tree.findall("./size/height")[0].text), "width": int(tree.findall("./size/width")[0].text), } instances = [] for obj in tree.findall("object"): cls = obj.find("name").text bbox = obj.find("bndbox") bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]] instances.append( {"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS} ) r["annotations"] = instances dicts.append(r) return dicts if __name__ == "__main__": import random import cv2 from detectron2.utils.visualizer import Visualizer import argparse # Parse command line arguments ap = argparse.ArgumentParser() ap.add_argument("--split", default="train") ap.add_argument("--samples", type=int, default=10) ap.add_argument("--scale", type=float, default=1.0) args = ap.parse_args() dataset_name = f"licenseplates_{args.split}" register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split) dataset_dicts = DatasetCatalog.get(dataset_name) for d in random.sample(dataset_dicts, args.samples): img = cv2.imread(d["file_name"]) visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(dataset_name), scale=args.scale) vis = visualizer.draw_dataset_dict(d) cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1]) # Exit? Press ESC if cv2.waitKey(0) & 0xFF == 27: break cv2.destroyAllWindows()
32.705882
100
0.579436
d4ece3f334aeba88cd76ec065663f9e04ac41d64
354
py
Python
docs/examples/pytorch/resnet50/scripts/test_read_speed.py
RogerChern/DALI
be143c3bb35458549e273608f1683a99ae41968e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/examples/pytorch/resnet50/scripts/test_read_speed.py
RogerChern/DALI
be143c3bb35458549e273608f1683a99ae41968e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
docs/examples/pytorch/resnet50/scripts/test_read_speed.py
RogerChern/DALI
be143c3bb35458549e273608f1683a99ae41968e
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
import glob import time import random filelist = glob.glob('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*') random.shuffle(filelist) begin = time.time() for i, f in enumerate(filelist): if i == 10000: break with open(f, "rb") as fin: result = fin.read() end = time.time() print("%.1f images/s" % (10000 / (end - begin)))
20.823529
75
0.641243
d4eced841f40608be5ce0f25f32b14e3f8c5be34
12,864
py
Python
ocellaris/solver_parts/boundary_conditions/dirichlet.py
TormodLandet/Ocellaris
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
[ "Apache-2.0" ]
1
2017-11-07T12:19:44.000Z
2017-11-07T12:19:44.000Z
ocellaris/solver_parts/boundary_conditions/dirichlet.py
TormodLandet/Ocellaris
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
[ "Apache-2.0" ]
null
null
null
ocellaris/solver_parts/boundary_conditions/dirichlet.py
TormodLandet/Ocellaris
6b4b2515fb881b1ed8d8fd8d8c23a8e1990ada58
[ "Apache-2.0" ]
2
2018-05-02T17:17:01.000Z
2019-03-11T13:09:40.000Z
# Copyright (C) 2015-2019 Tormod Landet # SPDX-License-Identifier: Apache-2.0 import dolfin from . import register_boundary_condition, BoundaryConditionCreator from ocellaris.utils import ( CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition, )
38.981818
108
0.615127
d4ed66dc63c65bd461e9e3340f0322d30f2b6c89
319
py
Python
count_split_inversions/test_count_split_inversions.py
abaldwin/algorithms
8c8722394c9115c572dadcd8ab601885512fd494
[ "Apache-2.0" ]
null
null
null
count_split_inversions/test_count_split_inversions.py
abaldwin/algorithms
8c8722394c9115c572dadcd8ab601885512fd494
[ "Apache-2.0" ]
null
null
null
count_split_inversions/test_count_split_inversions.py
abaldwin/algorithms
8c8722394c9115c572dadcd8ab601885512fd494
[ "Apache-2.0" ]
null
null
null
import unittest from count_split_inversions import count_inversions if __name__ == '__main__': unittest.main()
22.785714
51
0.705329
d4ee6e97a2bc58c8bc3ccf8cb1ebf6364e70cd9d
3,906
py
Python
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
Forest216/BigDL
840da9a2eaf395978dd83730b02aa5e5dfbd7989
[ "Apache-2.0" ]
null
null
null
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
Forest216/BigDL
840da9a2eaf395978dd83730b02aa5e5dfbd7989
[ "Apache-2.0" ]
null
null
null
python/chronos/test/bigdl/chronos/forecaster/tf/test_seq2seq_keras_forecaster.py
Forest216/BigDL
840da9a2eaf395978dd83730b02aa5e5dfbd7989
[ "Apache-2.0" ]
null
null
null
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest import tempfile import os from unittest import TestCase import numpy as np import tensorflow as tf if __name__ == '__main__': pytest.main([__file__])
38.294118
84
0.615463
d4eeb6ee82889a7b906d047189dd7b8bb9659a33
1,922
py
Python
examples/SubOrbitalFlight.py
nicolaikd/sl-ksp
cc1e239570e10428d11a41a26b33947b54f7f0ec
[ "MIT" ]
7
2021-01-11T15:39:56.000Z
2021-08-21T18:44:04.000Z
examples/SubOrbitalFlight.py
nicolaikd/sl-ksp
cc1e239570e10428d11a41a26b33947b54f7f0ec
[ "MIT" ]
1
2021-04-17T13:07:41.000Z
2021-04-21T16:21:35.000Z
examples/SubOrbitalFlight.py
nicolaikd/sl-ksp
cc1e239570e10428d11a41a26b33947b54f7f0ec
[ "MIT" ]
2
2021-03-17T16:36:23.000Z
2021-05-05T14:40:59.000Z
import time import krpc conn = krpc.connect(name='Sub-orbital flight') vessel = conn.space_center.active_vessel vessel.auto_pilot.target_pitch_and_heading(90, 90) vessel.auto_pilot.engage() vessel.control.throttle = 1 time.sleep(1) print('Launch!') vessel.control.activate_next_stage() fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel') expr = conn.krpc.Expression.less_than( conn.krpc.Expression.call(fuel_amount), conn.krpc.Expression.constant_float(0.1)) event = conn.krpc.add_event(expr) with event.condition: event.wait() print('Booster separation') vessel.control.activate_next_stage() mean_altitude = conn.get_call(getattr, vessel.flight(), 'mean_altitude') expr = conn.krpc.Expression.greater_than( conn.krpc.Expression.call(mean_altitude), conn.krpc.Expression.constant_double(10000)) event = conn.krpc.add_event(expr) with event.condition: event.wait() print('Gravity turn') vessel.auto_pilot.target_pitch_and_heading(60, 90) apoapsis_altitude = conn.get_call(getattr, vessel.orbit, 'apoapsis_altitude') expr = conn.krpc.Expression.greater_than( conn.krpc.Expression.call(apoapsis_altitude), conn.krpc.Expression.constant_double(100000)) event = conn.krpc.add_event(expr) with event.condition: event.wait() print('Launch stage separation') vessel.control.throttle = 0 time.sleep(1) vessel.control.activate_next_stage() vessel.auto_pilot.disengage() srf_altitude = conn.get_call(getattr, vessel.flight(), 'surface_altitude') expr = conn.krpc.Expression.less_than( conn.krpc.Expression.call(srf_altitude), conn.krpc.Expression.constant_double(1000)) event = conn.krpc.add_event(expr) with event.condition: event.wait() vessel.control.activate_next_stage() while vessel.flight(vessel.orbit.body.reference_frame).vertical_speed < -0.1: print('Altitude = %.1f meters' % vessel.flight().surface_altitude) time.sleep(1) print('Landed!')
30.03125
77
0.774714
d4ef39805175a5dc26b4a7a21bf430a89fe73653
321
py
Python
part02/part02-e11_rows_and_columns/src/rows_and_columns.py
davide-butera/data-analysis-with-python
78ba3d3d060ddb305bfd84b9a122409c15c47006
[ "MIT" ]
null
null
null
part02/part02-e11_rows_and_columns/src/rows_and_columns.py
davide-butera/data-analysis-with-python
78ba3d3d060ddb305bfd84b9a122409c15c47006
[ "MIT" ]
null
null
null
part02/part02-e11_rows_and_columns/src/rows_and_columns.py
davide-butera/data-analysis-with-python
78ba3d3d060ddb305bfd84b9a122409c15c47006
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import numpy as np if __name__ == "__main__": main()
16.894737
37
0.601246
d4efd4c2ab810bf4c725de159e2f410b24aea731
18,031
py
Python
ramp-database/ramp_database/tools/leaderboard.py
kegl/ramp-board
6373bf02efc096e02b26320e4f11edd00f9e5752
[ "BSD-3-Clause" ]
null
null
null
ramp-database/ramp_database/tools/leaderboard.py
kegl/ramp-board
6373bf02efc096e02b26320e4f11edd00f9e5752
[ "BSD-3-Clause" ]
null
null
null
ramp-database/ramp_database/tools/leaderboard.py
kegl/ramp-board
6373bf02efc096e02b26320e4f11edd00f9e5752
[ "BSD-3-Clause" ]
null
null
null
from distutils.version import LooseVersion from itertools import product import numpy as np import pandas as pd from ..model.event import Event from ..model.event import EventTeam from ..model.submission import Submission from ..model.team import Team from .team import get_event_team_by_name from .submission import get_bagged_scores from .submission import get_scores from .submission import get_submission_max_ram from .submission import get_time width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None pd.set_option('display.max_colwidth', width) def _compute_leaderboard(session, submissions, leaderboard_type, event_name, with_links=True): """Format the leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. submissions : list of :class:`ramp_database.model.Submission` The submission to report in the leaderboard. leaderboard_type : {'public', 'private'} The type of leaderboard to built. event_name : str The name of the event. with_links : bool Whether or not the submission name should be clickable. Returns ------- leaderboard : dataframe The leaderboard in a dataframe format. """ record_score = [] event = session.query(Event).filter_by(name=event_name).one() map_score_precision = {score_type.name: score_type.precision for score_type in event.score_types} for sub in submissions: # take only max n bag df_scores_bag = get_bagged_scores(session, sub.id) highest_level = df_scores_bag.index.get_level_values('n_bag').max() df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :] df_scores_bag.index = df_scores_bag.index.droplevel('n_bag') df_scores_bag = df_scores_bag.round(map_score_precision) df_scores = get_scores(session, sub.id) df_scores = df_scores.round(map_score_precision) df_time = get_time(session, sub.id) df_time = df_time.stack().to_frame() df_time.index = df_time.index.set_names(['fold', 'step']) df_time = df_time.rename(columns={0: 'time'}) df_time = df_time.sum(axis=0, level="step").T df_scores_mean = df_scores.groupby('step').mean() df_scores_std = df_scores.groupby('step').std() # select only the validation and testing steps and rename them to # public and private map_renaming = {'valid': 'public', 'test': 'private'} df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())] .rename(index=map_renaming) .stack().to_frame().T) df_scores_std = (df_scores_std.loc[list(map_renaming.keys())] .rename(index=map_renaming) .stack().to_frame().T) df_scores_bag = (df_scores_bag.rename(index=map_renaming) .stack().to_frame().T) df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1, keys=['bag', 'mean', 'std']) df.columns = df.columns.set_names(['stat', 'set', 'score']) # change the multi-index into a stacked index df.columns = df.columns.map(lambda x: " ".join(x)) # add the aggregated time information df_time.index = df.index df_time = df_time.rename( columns={'train': 'train time [s]', 'valid': 'validation time [s]', 'test': 'test time [s]'} ) df = pd.concat([df, df_time], axis=1) if leaderboard_type == 'private': df['submission ID'] = sub.basename.replace('submission_', '') df['team'] = sub.team.name df['submission'] = sub.name_with_link if with_links else sub.name df['contributivity'] = int(round(100 * sub.contributivity)) df['historical contributivity'] = int(round( 100 * sub.historical_contributivity)) df['max RAM [MB]'] = get_submission_max_ram(session, sub.id) df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp) record_score.append(df) # stack all the records df = pd.concat(record_score, axis=0, ignore_index=True, sort=False) # keep only second precision for the time stamp df['submitted at (UTC)'] = df['submitted at (UTC)'].astype('datetime64[s]') # reordered the column stats_order = (['bag', 'mean', 'std'] if leaderboard_type == 'private' else ['bag']) dataset_order = (['public', 'private'] if leaderboard_type == 'private' else ['public']) score_order = ([event.official_score_name] + [score_type.name for score_type in event.score_types if score_type.name != event.official_score_name]) score_list = [ '{} {} {}'.format(stat, dataset, score) for dataset, score, stat in product(dataset_order, score_order, stats_order) ] # Only display train and validation time for the public leaderboard time_list = (['train time [s]', 'validation time [s]', 'test time [s]'] if leaderboard_type == 'private' else ['train time [s]', 'validation time [s]']) col_ordered = ( ['team', 'submission'] + score_list + ['contributivity', 'historical contributivity'] + time_list + ['max RAM [MB]', 'submitted at (UTC)'] ) if leaderboard_type == "private": col_ordered = ["submission ID"] + col_ordered df = df[col_ordered] # check if the contributivity columns are null contrib_columns = ['contributivity', 'historical contributivity'] if (df[contrib_columns] == 0).all(axis=0).all(): df = df.drop(columns=contrib_columns) df = df.sort_values( "bag {} {}".format(leaderboard_type, event.official_score_name), ascending=event.get_official_score_type(session).is_lower_the_better ) # rename the column name for the public leaderboard if leaderboard_type == 'public': df = df.rename(columns={ key: value for key, value in zip(score_list, score_order) }) return df def _compute_competition_leaderboard(session, submissions, leaderboard_type, event_name): """Format the competition leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. submissions : list of :class:`ramp_database.model.Submission` The submission to report in the leaderboard. leaderboard_type : {'public', 'private'} The type of leaderboard to built. event_name : str The name of the event. Returns ------- competition_leaderboard : dataframe The competition leaderboard in a dataframe format. """ event = session.query(Event).filter_by(name=event_name).one() score_type = event.get_official_score_type(session) score_name = event.official_score_name private_leaderboard = _compute_leaderboard(session, submissions, 'private', event_name, with_links=False) time_list = (['train time [s]', 'validation time [s]', 'test time [s]'] if leaderboard_type == 'private' else ['train time [s]', 'validation time [s]']) col_selected_private = (['team', 'submission'] + ['bag private ' + score_name, 'bag public ' + score_name] + time_list + ['submitted at (UTC)']) leaderboard_df = private_leaderboard[col_selected_private] leaderboard_df = leaderboard_df.rename( columns={'bag private ' + score_name: 'private ' + score_name, 'bag public ' + score_name: 'public ' + score_name} ) # select best submission for each team best_df = (leaderboard_df.groupby('team').min() if score_type.is_lower_the_better else leaderboard_df.groupby('team').max()) best_df = best_df[['public ' + score_name]].reset_index() best_df['best'] = True # merge to get a best indicator column then select best leaderboard_df = pd.merge( leaderboard_df, best_df, how='left', left_on=['team', 'public ' + score_name], right_on=['team', 'public ' + score_name] ) leaderboard_df = leaderboard_df.fillna(False) leaderboard_df = leaderboard_df[leaderboard_df['best']] leaderboard_df = leaderboard_df.drop(columns='best') # dealing with ties: we need the lowest timestamp best_df = leaderboard_df.groupby('team').min() best_df = best_df[['submitted at (UTC)']].reset_index() best_df['best'] = True leaderboard_df = pd.merge( leaderboard_df, best_df, how='left', left_on=['team', 'submitted at (UTC)'], right_on=['team', 'submitted at (UTC)']) leaderboard_df = leaderboard_df.fillna(False) leaderboard_df = leaderboard_df[leaderboard_df['best']] leaderboard_df = leaderboard_df.drop(columns='best') # sort by public score then by submission timestamp, compute rank leaderboard_df = leaderboard_df.sort_values( by=['public ' + score_name, 'submitted at (UTC)'], ascending=[score_type.is_lower_the_better, True]) leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1 # sort by private score then by submission timestamp, compute rank leaderboard_df = leaderboard_df.sort_values( by=['private ' + score_name, 'submitted at (UTC)'], ascending=[score_type.is_lower_the_better, True]) leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1 leaderboard_df['move'] = \ leaderboard_df['public rank'] - leaderboard_df['private rank'] leaderboard_df['move'] = [ '{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']] col_selected = ( [leaderboard_type + ' rank', 'team', 'submission', leaderboard_type + ' ' + score_name] + time_list + ['submitted at (UTC)'] ) if leaderboard_type == 'private': col_selected.insert(1, 'move') df = leaderboard_df[col_selected] df = df.rename(columns={ leaderboard_type + ' ' + score_name: score_name, leaderboard_type + ' rank': 'rank' }) df = df.sort_values(by='rank') return df def get_leaderboard(session, leaderboard_type, event_name, user_name=None, with_links=True): """Get a leaderboard. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. leaderboard_type : {'public', 'private', 'failed', 'new', \ 'public competition', 'private competition'} The type of leaderboard to generate. event_name : str The event name. user_name : None or str, default is None The user name. If None, scores from all users will be queried. This parameter is discarded when requesting the competition leaderboard. with_links : bool, default is True Whether or not the submission name should be clickable. Returns ------- leaderboard : str The leaderboard in HTML format. """ q = (session.query(Submission) .filter(Event.id == EventTeam.event_id) .filter(Team.id == EventTeam.team_id) .filter(EventTeam.id == Submission.event_team_id) .filter(Event.name == event_name)) if user_name is not None: q = q.filter(Team.name == user_name) submissions = q.all() submission_filter = {'public': 'is_public_leaderboard', 'private': 'is_private_leaderboard', 'failed': 'is_error', 'new': 'is_new', 'public competition': 'is_in_competition', 'private competition': 'is_in_competition'} submissions = [sub for sub in submissions if (getattr(sub, submission_filter[leaderboard_type]) and sub.is_not_sandbox)] if not submissions: return None if leaderboard_type in ['public', 'private']: df = _compute_leaderboard( session, submissions, leaderboard_type, event_name, with_links=with_links ) elif leaderboard_type in ['new', 'failed']: if leaderboard_type == 'new': columns = ['team', 'submission', 'submitted at (UTC)', 'state'] else: columns = ['team', 'submission', 'submitted at (UTC)', 'error'] # we rely on the zip function ignore the submission state if the error # column was not appended data = [{ column: value for column, value in zip( columns, [sub.event_team.team.name, sub.name_with_link, pd.Timestamp(sub.submission_timestamp), (sub.state_with_link if leaderboard_type == 'failed' else sub.state)]) } for sub in submissions] df = pd.DataFrame(data, columns=columns) else: # make some extra filtering submissions = [sub for sub in submissions if sub.is_public_leaderboard] if not submissions: return None competition_type = ('public' if 'public' in leaderboard_type else 'private') df = _compute_competition_leaderboard( session, submissions, competition_type, event_name ) df_html = df.to_html(escape=False, index=False, max_cols=None, max_rows=None, justify='left') df_html = '<thead> {} </tbody>'.format( df_html.split('<thead>')[1].split('</tbody>')[0] ) return df_html def update_leaderboards(session, event_name, new_only=False): """Update the leaderboards for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. new_only : bool, default is False Whether or not to update the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event = session.query(Event).filter_by(name=event_name).one() if not new_only: event.private_leaderboard_html = get_leaderboard( session, 'private', event_name ) event.public_leaderboard_html_with_links = get_leaderboard( session, 'public', event_name ) event.public_leaderboard_html_no_links = get_leaderboard( session, 'public', event_name, with_links=False ) event.failed_leaderboard_html = get_leaderboard( session, 'failed', event_name ) event.public_competition_leaderboard_html = get_leaderboard( session, 'public competition', event_name ) event.private_competition_leaderboard_html = get_leaderboard( session, 'private competition', event_name ) event.new_leaderboard_html = get_leaderboard( session, 'new', event_name ) session.commit() def update_user_leaderboards(session, event_name, user_name, new_only=False): """Update the of a user leaderboards for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. user_name : str The user name. If None, scores from all users will be queried. new_only : bool, default is False Whether or not to update the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event_team = get_event_team_by_name(session, event_name, user_name) if not new_only: event_team.leaderboard_html = get_leaderboard( session, 'public', event_name, user_name ) event_team.failed_leaderboard_html = get_leaderboard( session, 'failed', event_name, user_name ) event_team.new_leaderboard_html = get_leaderboard( session, 'new', event_name, user_name ) session.commit() def update_all_user_leaderboards(session, event_name, new_only=False): """Update the leaderboards for all users for a given event. Parameters ---------- session : :class:`sqlalchemy.orm.Session` The session to directly perform the operation on the database. event_name : str The event name. new_only : bool, default is False Whether or not to update the whole leaderboards or only the new submissions. You can turn this option to True when adding a new submission in the database. """ event = session.query(Event).filter_by(name=event_name).one() event_teams = session.query(EventTeam).filter_by(event=event).all() for event_team in event_teams: user_name = event_team.team.name if not new_only: event_team.leaderboard_html = get_leaderboard( session, 'public', event_name, user_name ) event_team.failed_leaderboard_html = get_leaderboard( session, 'failed', event_name, user_name ) event_team.new_leaderboard_html = get_leaderboard( session, 'new', event_name, user_name ) session.commit()
39.455142
79
0.619378
d4f07209eebdfab152cf385342225e58c7210495
623
py
Python
projects/boring_stuff/03_functions/ZigZag.py
SavantLogics/Visual_Studio_Python_Scripts-master
9e3c5f8a8f685f9ae51045af9260ccc28f89d72f
[ "MIT" ]
null
null
null
projects/boring_stuff/03_functions/ZigZag.py
SavantLogics/Visual_Studio_Python_Scripts-master
9e3c5f8a8f685f9ae51045af9260ccc28f89d72f
[ "MIT" ]
null
null
null
projects/boring_stuff/03_functions/ZigZag.py
SavantLogics/Visual_Studio_Python_Scripts-master
9e3c5f8a8f685f9ae51045af9260ccc28f89d72f
[ "MIT" ]
null
null
null
#Automate the Boring Stuff with Python import time, sys indent = 0 # How many spaces to indent indent_Increasing = True # Whether the indentation is increasing or not try: while True: # The main program loop print(' ' * indent, end='') print('********') time.sleep(0.1) # Pause for 1/10th of a second if indent_Increasing: indent = indent + 1 if indent == 20: indent_Increasing = False else: indent = indent - 1 if indent == 0: indent_Increasing = True except KeyboardInterrupt(): sys.exit()
27.086957
71
0.569823
d4f0759288304875f2de20fc2b91d86d509cb718
3,820
py
Python
examples/add_compensation_to_sample.py
whitews/ReFlowRESTClient
69369bbea501382291b71facea7a511ab8f7848b
[ "BSD-3-Clause" ]
null
null
null
examples/add_compensation_to_sample.py
whitews/ReFlowRESTClient
69369bbea501382291b71facea7a511ab8f7848b
[ "BSD-3-Clause" ]
null
null
null
examples/add_compensation_to_sample.py
whitews/ReFlowRESTClient
69369bbea501382291b71facea7a511ab8f7848b
[ "BSD-3-Clause" ]
null
null
null
import getpass import sys import json from reflowrestclient.utils import * host = raw_input('Host: ') username = raw_input('Username: ') password = getpass.getpass('Password: ') token = get_token(host, username, password) if token: print "Authentication successful" print '=' * 40 else: print "No token for you!!!" sys.exit() while True: start()
28.939394
100
0.625654
d4f12c3a663d3edb5021b78314c1afd940fc7b1a
412
py
Python
accountifie/toolkit/urls.py
imcallister/accountifie
094834c9d632e0353e3baf8d924eeb10cba0add4
[ "MIT", "Unlicense" ]
4
2017-06-02T08:48:48.000Z
2021-11-21T23:57:15.000Z
accountifie/toolkit/urls.py
imcallister/accountifie
094834c9d632e0353e3baf8d924eeb10cba0add4
[ "MIT", "Unlicense" ]
3
2020-06-05T16:55:42.000Z
2021-06-10T17:43:12.000Z
accountifie/toolkit/urls.py
imcallister/accountifie
094834c9d632e0353e3baf8d924eeb10cba0add4
[ "MIT", "Unlicense" ]
4
2015-12-15T14:27:51.000Z
2017-04-21T21:42:27.000Z
from django.conf import settings from django.conf.urls import url, static from . import views from . import jobs urlpatterns = [ url(r'^choose_company/(?P<company_id>.*)/$', views.choose_company, name='choose_company'), url(r'^cleanlogs/$', jobs.cleanlogs, name='cleanlogs'), url(r'^primecache/$', jobs.primecache, name='primecache'), url(r'^dump_fixtures/$', views.dump_fixtures), ]
27.466667
98
0.686893
d4f145e4f5e9df82c3ed3f3cc3dee6abaad4fc6c
838
py
Python
setup.py
sequentialchaos/i3-workspace-swap
86646066b9f971c1ff130a642a914ab2db8f9ae6
[ "MIT" ]
null
null
null
setup.py
sequentialchaos/i3-workspace-swap
86646066b9f971c1ff130a642a914ab2db8f9ae6
[ "MIT" ]
null
null
null
setup.py
sequentialchaos/i3-workspace-swap
86646066b9f971c1ff130a642a914ab2db8f9ae6
[ "MIT" ]
null
null
null
import setuptools with open("README.md", "r") as fh: long_description = fh.read() setuptools.setup( name="i3-workspace-swap", description='A python utility swap the content of two workplaces in i3wm', long_description=long_description, long_description_content_type="text/markdown", version="1.1.0", url='https://github.com/einzigartigername/i3-workspace-swap', license='MIT', author='Nelson Gillo', author_email='[email protected]', packages=setuptools.find_packages(), scripts=['i3-workspace-swap'], install_requires=['i3ipc'], classifiers=[ "Intended Audience :: End Users/Desktop", "License :: OSI Approved :: MIT License", "Operating System :: POSIX :: Linux", 'Programming Language :: Python :: 3' ], python_requires='>=3.6', )
27.032258
78
0.658711
d4f1aa99ca10cb206e4f7702a9c7de6f3d6dfd4e
5,975
py
Python
intersight/models/niaapi_version_regex_all_of.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
21
2018-03-29T14:20:35.000Z
2021-10-13T05:11:41.000Z
intersight/models/niaapi_version_regex_all_of.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
14
2018-01-30T15:45:46.000Z
2022-02-23T14:23:21.000Z
intersight/models/niaapi_version_regex_all_of.py
sdnit-se/intersight-python
551f7685c0f76bb8af60ec83ffb6f9672d49a4ae
[ "Apache-2.0" ]
18
2018-01-03T15:09:56.000Z
2021-07-16T02:21:54.000Z
# coding: utf-8 """ Cisco Intersight Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501 The version of the OpenAPI document: 1.0.9-1295 Contact: [email protected] Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from intersight.configuration import Configuration def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, NiaapiVersionRegexAllOf): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, NiaapiVersionRegexAllOf): return True return self.to_dict() != other.to_dict()
34.738372
1,052
0.627113
d4f20508bec1fb3b3210c9cb30a6481120876c56
2,158
py
Python
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py
genemerewether/fprime
fcdd071b5ddffe54ade098ca5d451903daba9eed
[ "Apache-2.0" ]
5
2019-10-22T03:41:02.000Z
2022-01-16T12:48:31.000Z
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py
genemerewether/fprime
fcdd071b5ddffe54ade098ca5d451903daba9eed
[ "Apache-2.0" ]
27
2019-02-07T17:58:58.000Z
2019-08-13T00:46:24.000Z
ROS/fprime_ws/src/genfprime/src/genfprime/generate_modmk.py
genemerewether/fprime
fcdd071b5ddffe54ade098ca5d451903daba9eed
[ "Apache-2.0" ]
3
2019-01-01T18:44:37.000Z
2019-08-01T01:19:39.000Z
# # Copyright 2004-2016, by the California Institute of Technology. # ALL RIGHTS RESERVED. United States Government Sponsorship # acknowledged. Any commercial use must be negotiated with the Office # of Technology Transfer at the California Institute of Technology. # # This software may be subject to U.S. export control laws and # regulations. By accepting this document, the user agrees to comply # with all U.S. export laws and regulations. User has the # responsibility to obtain export licenses, or other export authority # as may be required before exporting such information to foreign # countries or providing access to foreign persons. # from __future__ import print_function import os from genmsg import MsgGenerationException #from . name import * ## :param type_name outdir: Full path to output directory ## :returns int: status. 0 if successful
37.206897
96
0.698332
d4f37664ce2a24dbc73824c236ef48b007de021a
6,681
py
Python
tests/test_compare.py
fool65c/jupytext
4b55d2e6ccc995c04679de0863234c60c3741a69
[ "MIT" ]
1
2019-05-06T07:39:15.000Z
2019-05-06T07:39:15.000Z
tests/test_compare.py
royalosyin/jupytext
72aa6c4968da714323fbd7a7c548ee4b1274c946
[ "MIT" ]
null
null
null
tests/test_compare.py
royalosyin/jupytext
72aa6c4968da714323fbd7a7c548ee4b1274c946
[ "MIT" ]
null
null
null
import pytest from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion def test_raise_on_incomplete_markdown_cell(): ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')]) test = new_notebook(cells=[new_markdown_cell('Cell one')]) with pytest.raises(NotebookDifference): compare_notebooks(ref, test, 'md') def test_does_raise_on_split_markdown_cell(): ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')]) test = new_notebook(cells=[new_markdown_cell('Cell one'), new_markdown_cell('second line')]) with pytest.raises(NotebookDifference): compare_notebooks(ref, test, 'md') def test_raise_on_different_cell_metadata(): ref = new_notebook(cells=[new_code_cell('1+1')]) test = new_notebook(cells=[new_code_cell('1+1', metadata={'metakey': 'value'})]) with pytest.raises(NotebookDifference): compare_notebooks(ref, test, 'py:light')
41.75625
119
0.658135
d4f450e40179e22e5b7878cbc391794da9f23b06
14,026
py
Python
Cogs/Actions.py
MrAngelDo6pa/MedBotS
89e19d831507e20d0898114502967b2ad8ecf957
[ "MIT" ]
2
2021-09-28T10:40:10.000Z
2021-11-07T14:49:07.000Z
Cogs/Actions.py
ddoskid/lol12
35c097bbebeca3043a939b902b07474473344a3c
[ "MIT" ]
null
null
null
Cogs/Actions.py
ddoskid/lol12
35c097bbebeca3043a939b902b07474473344a3c
[ "MIT" ]
null
null
null
import asyncio import discord import random import datetime from discord.ext import commands from Cogs import DisplayName from Cogs import Nullify
51.566176
138
0.654855
d4f46e1bb0a2bc679bb20e6fc52d23194cb01643
7,830
py
Python
marltoolbox/examples/tune_function_api/lola_pg_official.py
tobiasbaumann1/amd
cb6190be92dea54db04ef9202d381b96f6f6218b
[ "MIT" ]
null
null
null
marltoolbox/examples/tune_function_api/lola_pg_official.py
tobiasbaumann1/amd
cb6190be92dea54db04ef9202d381b96f6f6218b
[ "MIT" ]
null
null
null
marltoolbox/examples/tune_function_api/lola_pg_official.py
tobiasbaumann1/amd
cb6190be92dea54db04ef9202d381b96f6f6218b
[ "MIT" ]
null
null
null
########## # Additional dependencies are needed: # Follow the LOLA installation described in the tune_class_api/lola_pg_official.py file ########## import os import ray from ray import tune import marltoolbox.algos.lola.envs as lola_envs import marltoolbox.algos.lola_dice.envs as lola_dice_envs from marltoolbox.algos.lola import train_cg, train_exact, train_pg from marltoolbox.envs.vectorized_coin_game import CoinGame, AsymCoinGame from marltoolbox.utils import log if __name__ == "__main__": debug_mode = True main(debug_mode)
37.826087
116
0.577395
d4f523ec6d8e4a47a69a4a400a7f08b9647af175
1,154
py
Python
src/cut_link/utils.py
true7/srt
d5accd411e73ade4ed40a41759e95cb20fbda98d
[ "MIT" ]
null
null
null
src/cut_link/utils.py
true7/srt
d5accd411e73ade4ed40a41759e95cb20fbda98d
[ "MIT" ]
null
null
null
src/cut_link/utils.py
true7/srt
d5accd411e73ade4ed40a41759e95cb20fbda98d
[ "MIT" ]
null
null
null
import string import random import json from calendar import month_name from django.conf import settings SHORTLINK_MIN = getattr(settings, "SHORTLINK_MIN", 6) def json_data_func(instance): ''' Return json format data, ready for passing into AmCharts. Contains 2 items - name of the month and count of distinct links, which were cut on the website. ''' class_ = instance.__class__ # FIXME. The problem is every next year it will add results above result = [] for month in range(1, len(month_name)): count_use = class_.objects.filter(pub_date__month=month).count() data = dict(month=month_name[month], count=count_use) result.append(data) json_data = json.dumps(result) return json_data
27.47619
72
0.710572
d4f583072901ee0ab94c10d93e238c7f33bf30a3
4,745
py
Python
lib/tool_shed/scripts/bootstrap_tool_shed/bootstrap_util.py
blankenberg/galaxy-data-resource
ca32a1aafd64948f489a4e5cf88096f32391b1d9
[ "CC-BY-3.0" ]
null
null
null
lib/tool_shed/scripts/bootstrap_tool_shed/bootstrap_util.py
blankenberg/galaxy-data-resource
ca32a1aafd64948f489a4e5cf88096f32391b1d9
[ "CC-BY-3.0" ]
1
2015-02-21T18:48:19.000Z
2015-02-27T15:50:32.000Z
lib/tool_shed/scripts/bootstrap_tool_shed/bootstrap_util.py
blankenberg/galaxy-data-resource
ca32a1aafd64948f489a4e5cf88096f32391b1d9
[ "CC-BY-3.0" ]
3
2015-02-22T13:34:16.000Z
2020-10-01T01:28:04.000Z
#!/usr/bin/python import argparse import ConfigParser import os import sys new_path = [ os.path.join( os.getcwd(), "lib" ) ] new_path.extend( sys.path[1:] ) sys.path = new_path from galaxy import eggs eggs.require( "SQLAlchemy >= 0.4" ) import galaxy.webapps.tool_shed.model.mapping as tool_shed_model from sqlalchemy.exc import ProgrammingError from sqlalchemy.exc import OperationalError from tool_shed.util import xml_util parser = argparse.ArgumentParser() parser.add_argument( '-c', '--config_file', dest='config', action='store', default='config/tool_shed.ini.sample' ) parser.add_argument( '-e', '--execute', dest='method', action='store', default='check_db' ) args = parser.parse_args() if __name__ == '__main__': exit( main( args ) )
36.221374
140
0.641307
d4f5c78a68ce3ab44360536293de688747eefa47
1,327
py
Python
moto/dynamodbstreams/responses.py
jonnangle/moto-1
40b4e299abb732aad7f56cc0f680c0a272a46594
[ "Apache-2.0" ]
3
2020-08-04T20:29:41.000Z
2020-11-09T09:28:19.000Z
moto/dynamodbstreams/responses.py
jonnangle/moto-1
40b4e299abb732aad7f56cc0f680c0a272a46594
[ "Apache-2.0" ]
17
2020-08-28T12:53:56.000Z
2020-11-10T01:04:46.000Z
moto/dynamodbstreams/responses.py
jonnangle/moto-1
40b4e299abb732aad7f56cc0f680c0a272a46594
[ "Apache-2.0" ]
2
2017-03-02T05:59:52.000Z
2020-09-03T13:25:44.000Z
from __future__ import unicode_literals from moto.core.responses import BaseResponse from .models import dynamodbstreams_backends from six import string_types
32.365854
75
0.699322
d4f6462a075ffe065a5c5d813a1e145ed305cf7d
962
py
Python
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py
ytorzuk-altran/openvino
68d460a3bb578a738ba0e4d0e1f2e321afa73ab0
[ "Apache-2.0" ]
1
2021-04-20T08:14:51.000Z
2021-04-20T08:14:51.000Z
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py
ytorzuk-altran/openvino
68d460a3bb578a738ba0e4d0e1f2e321afa73ab0
[ "Apache-2.0" ]
55
2020-11-16T09:55:29.000Z
2022-03-28T13:18:15.000Z
tools/mo/openvino/tools/mo/front/mxnet/zeros_ext.py
ytorzuk-altran/openvino
68d460a3bb578a738ba0e4d0e1f2e321afa73ab0
[ "Apache-2.0" ]
1
2021-02-15T01:13:57.000Z
2021-02-15T01:13:57.000Z
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import numpy as np from openvino.tools.mo.front.extractor import FrontExtractorOp from openvino.tools.mo.front.mxnet.extractors.utils import get_mxnet_layer_attrs from openvino.tools.mo.ops.const import Const
28.294118
80
0.637214
d4f6be38b9352af5e1c2c173a5437dc5e5702e4d
4,359
py
Python
tools/jslib_builder.py
Jumpscale/jumpscale_portal8
3a4d56a1ba985b68fe9b525aed2486a54808332f
[ "Apache-2.0" ]
null
null
null
tools/jslib_builder.py
Jumpscale/jumpscale_portal8
3a4d56a1ba985b68fe9b525aed2486a54808332f
[ "Apache-2.0" ]
74
2015-12-28T16:17:20.000Z
2021-09-08T12:28:59.000Z
tools/jslib_builder.py
Jumpscale/jumpscale_portal8
3a4d56a1ba985b68fe9b525aed2486a54808332f
[ "Apache-2.0" ]
null
null
null
from JumpScale import j b = builder() b.do()
36.630252
175
0.566873
d4f6ca3a52378c092fed2c8021d1ffb5c3d7441c
882
py
Python
SimpleSimulator/samuelator.py
Anindya-Prithvi/CO_M21_Assignment
524bd2b866dd58a6358354cda65e2136ecd46e50
[ "Apache-2.0" ]
3
2021-09-11T05:58:46.000Z
2021-12-21T14:03:20.000Z
SimpleSimulator/samuelator.py
sc0rp10n-py/CO_M21_Assignment
524bd2b866dd58a6358354cda65e2136ecd46e50
[ "Apache-2.0" ]
null
null
null
SimpleSimulator/samuelator.py
sc0rp10n-py/CO_M21_Assignment
524bd2b866dd58a6358354cda65e2136ecd46e50
[ "Apache-2.0" ]
3
2021-09-05T12:55:38.000Z
2022-03-18T02:51:29.000Z
import sys import warnings import matplotlib.pyplot as plt from parsets import IMACC, IMG, PROGC, REGFLPC, ExecE, plot warnings.filterwarnings("ignore") MEM = IMACC(sys.stdin.read()) # Load memory from stdin PC = PROGC(0) # Start from the first instruction RF = REGFLPC() # initialize register and flags EE = ExecE(MEM) IM = IMG() halted = False cycle = 0 if MEM.inst_mem == ["0" * 16 for i in range(256)]: halted = True while not halted: Instruction = MEM.getData(PC) # Get current instruction IM.imgx.append(cycle) IM.imgy.append(PC.PC) halted, new_PC, new_regs = EE.execute(Instruction, RF.asdct(), IM, cycle) # Update RF compute new_PC RF.update(new_regs, new_PC) PC.dump() # Print PC RF.dump() # Print RF state PC.update(new_PC) # Update PC cycle += 1 MEM.dump() # Print memory state # plotting plot(plt, IM)
22.615385
77
0.672336
d4f722d8fa5429ebec246908bcfdfc1e45bff80b
5,884
py
Python
utils/converters.py
LiReNa00/JDBot
c85b31e272d5394ba5debc26b8b5357fb9d3d844
[ "MIT" ]
null
null
null
utils/converters.py
LiReNa00/JDBot
c85b31e272d5394ba5debc26b8b5357fb9d3d844
[ "MIT" ]
null
null
null
utils/converters.py
LiReNa00/JDBot
c85b31e272d5394ba5debc26b8b5357fb9d3d844
[ "MIT" ]
null
null
null
import discord import re import emoji import contextlib import typing import datetime from discord.ext import commands from discord.http import Route def generate_snowflake(dt: typing.Optional[datetime.datetime] = None) -> int: """Returns a numeric snowflake pretending to be created at the given date but more accurate and random than time_snowflake. If No dt is not passed, it makes one from the current time using utcnow. Parameters ----------- dt: :class:`datetime.datetime` A datetime object to convert to a snowflake. If naive, the timezone is assumed to be local time. Returns -------- :class:`int` The snowflake representing the time given. """ dt = dt or discord.utils.utcnow() return int(dt.timestamp() * 1000 - 1420070400000) << 22 | 0x3FFFFF # remove if edpy adds my pull request into the master.
31.132275
127
0.593644
d4f79ba15482dc239d99373d27359b1da32e98ba
1,172
py
Python
kissim/cli/encode.py
AJK-dev/kissim
15375000d47b5d5485322fc725809f853a3659de
[ "MIT" ]
15
2020-06-23T14:46:07.000Z
2022-02-03T04:23:56.000Z
kissim/cli/encode.py
volkamerlab/kissim
35198a5efd4b651dd3952bf26ac5098fd1c4dfaa
[ "MIT" ]
66
2020-11-05T11:45:21.000Z
2021-12-15T12:11:20.000Z
kissim/cli/encode.py
AJK-dev/kissim
15375000d47b5d5485322fc725809f853a3659de
[ "MIT" ]
3
2021-02-27T12:56:27.000Z
2022-02-03T04:23:57.000Z
""" kissim.cli.encode Encode structures (generate fingerprints) from CLI arguments. """ import numpy as np from kissim.api import encode from kissim.cli.utils import configure_logger def encode_from_cli(args): """ Encode structures. Parameters ---------- args : argsparse.Namespace CLI arguments. """ configure_logger(args.output) structure_klifs_ids = _parse_structure_klifs_ids(args.input) encode(structure_klifs_ids, args.output, args.local, args.ncores) def _parse_structure_klifs_ids(args_input): """ Parse structure KLIFS IDs. Parameters ---------- args_input : list of str Either path to txt file with structure KLIFS ID (one ID per row) or one or more structure KLIFS IDs. Returns ------- list of int List of structure KLIFS IDs. """ if len(args_input) == 1: try: structure_klifs_ids = [int(args_input[0])] except ValueError: structure_klifs_ids = np.genfromtxt(fname=args_input[0], dtype=int).tolist() else: structure_klifs_ids = [int(i) for i in args_input] return structure_klifs_ids
22.113208
97
0.654437
d4f91839d0ba937bffd97ff3a607f1dad1fc55ad
1,690
py
Python
distanceProfile.py
ZiyaoWei/pyMatrixProfile
1c88e1558e2bc5210d328d253572f5ff7fab1a5e
[ "MIT" ]
29
2017-08-13T04:24:16.000Z
2021-12-24T07:51:08.000Z
Matrix Profile/Implementation/pyMatrixProfile-master/distanceProfile.py
rakesh-lagare/Thesis_Work
733285eae31a3fd8b613ec30d9e2ab9befd57614
[ "Apache-2.0" ]
2
2018-02-12T11:58:53.000Z
2018-08-20T19:51:47.000Z
Matrix Profile/Implementation/pyMatrixProfile-master/distanceProfile.py
rakesh-lagare/Thesis_Work
733285eae31a3fd8b613ec30d9e2ab9befd57614
[ "Apache-2.0" ]
15
2017-08-19T23:16:45.000Z
2019-09-21T04:53:43.000Z
import numpy as np from util import * def naiveDistanceProfile(tsA, idx, m, tsB = None): """Return the distance profile of query against ts. Use the naive all pairs comparison algorithm. >>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3) array([[ 2. , 2.828, 2. ], [ 0. , 0. , 0. ]]) """ selfJoin = False if tsB is None: selfJoin = True tsB = tsA query = tsA[idx : (idx + m)] distanceProfile = [] n = len(tsB) for i in range(n - m + 1): distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m])) if selfJoin: trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB))) distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf return (distanceProfile, np.full(n - m + 1, idx, dtype = float)) def stampDistanceProfile(tsA, idx, m, tsB = None): """ >>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3) array([[ 2. , 2.828, 2. ], [ 0. , 0. , 0. ]]) """ selfJoin = False if tsB is None: selfJoin = True tsB = tsA query = tsA[idx : (idx + m)] n = len(tsB) distanceProfile = mass(query, tsB) if selfJoin: trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB))) distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf return (distanceProfile, np.full(n - m + 1, idx, dtype = float)) if __name__ == "__main__": import doctest doctest.testmod()
34.489796
112
0.56213
d4f945809a73eb22e79d64ed4418fcf53a6bccb9
73
py
Python
test_0000.py
theo-dim/cash-gels-thesis
de8c1b20f766aa1c58d8f692373c76683d165a66
[ "MIT" ]
null
null
null
test_0000.py
theo-dim/cash-gels-thesis
de8c1b20f766aa1c58d8f692373c76683d165a66
[ "MIT" ]
null
null
null
test_0000.py
theo-dim/cash-gels-thesis
de8c1b20f766aa1c58d8f692373c76683d165a66
[ "MIT" ]
null
null
null
import pyplot as plt import numpy as np from sklearn import linear_model
18.25
32
0.835616
d4fae683109b51c37a205d6ed228be7bbb86f029
7,868
py
Python
vnTrader/uiMainWindow.py
bttt123/TradeSim
2374b0925d34d8fb299095250c5c8834192848ce
[ "Apache-2.0" ]
null
null
null
vnTrader/uiMainWindow.py
bttt123/TradeSim
2374b0925d34d8fb299095250c5c8834192848ce
[ "Apache-2.0" ]
null
null
null
vnTrader/uiMainWindow.py
bttt123/TradeSim
2374b0925d34d8fb299095250c5c8834192848ce
[ "Apache-2.0" ]
1
2022-03-29T21:57:31.000Z
2022-03-29T21:57:31.000Z
# encoding: UTF-8 from builtins import str import psutil # import sys # PyQt 4/5 compatibility try: from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout from PyQt4 import QtCore except ImportError: from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout from PyQt5 import QtCore from uiBasicWidget import * import uiBasicWidget as wgs #from . import uiBasicWidget as wgs ######################################################################## ########################################################################
35.441441
121
0.521734
d4fb4e3677b230700c8377c0c0d538eea2ac4e41
9,431
py
Python
line_notify_core.py
ficgra/PChome-alertor
5f4e798e3130c170eb75e03215128590ed02dcf9
[ "Apache-2.0" ]
1
2021-06-16T00:36:22.000Z
2021-06-16T00:36:22.000Z
line_notify_core.py
ficgra/PChome-alertor
5f4e798e3130c170eb75e03215128590ed02dcf9
[ "Apache-2.0" ]
null
null
null
line_notify_core.py
ficgra/PChome-alertor
5f4e798e3130c170eb75e03215128590ed02dcf9
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # coding: utf-8 # In[ ]: import requests import json import re from flask import Flask, request, abort import mysql.connector as mariadb from mysql.connector import Error from linebot import ( LineBotApi, WebhookHandler ) from linebot.exceptions import ( InvalidSignatureError ) from linebot.models import ( MessageEvent, TextMessage, TextSendMessage, FollowEvent, ) app = Flask(__name__) line_bot_api = LineBotApi('') handler = WebhookHandler('') #line /callbackEvent #lineEvent #notifypost/register # #codenotify-bot postaccess_token def get_token(code): headers = { "Content-Type":"application/x-www-form-urlencoded" } params = { "grant_type":"authorization_code", "code": code, "redirect_uri":"https://line.husan.cc/register", # host_ip "client_id":"client_id", #notify client_id "client_secret":"client_secret" #notify client_secret } r = requests.post('https://notify-bot.line.me/oauth/token',headers=headers,params=params) source = json.loads(r.text) access_token = source['access_token'] return access_token #notify # # # # #notify_access_token # if __name__ == "__main__": app.run('0.0.0.0',port=3000)
35.190299
214
0.626021
d4fd04698f7477aacd1d458ba68e94970c4579ef
1,143
py
Python
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
21
2016-11-03T12:30:50.000Z
2022-03-24T06:54:14.000Z
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
1
2019-04-02T02:01:27.000Z
2019-04-07T21:07:10.000Z
sfc_models/examples/scripts/intro_X_XX_sim_multiplier.py
MachineLP/SFC_models
d438a4e3e88534a206c761cda7a3f6a58ac3a0ac
[ "Apache-2.0" ]
12
2016-11-03T12:30:57.000Z
2021-09-14T23:08:23.000Z
# coding=utf-8 from sfc_models.objects import * from sfc_models.examples.Quick2DPlot import Quick2DPlot register_standard_logs('output', __file__) mod = Model() country = Country(mod, 'CO') Household(country, 'HH') ConsolidatedGovernment(country, 'GOV') FixedMarginBusiness(country, 'BUS', profit_margin=.025) Market(country, 'GOOD') Market(country, 'LAB') TaxFlow(country, 'TAX', taxrate=.2) # At time period 25, cut spending to 17 (from 20) mod.AddExogenous('GOV', 'DEM_GOOD', [20.,]* 25 + [17.,]*20) mod.AddGlobalEquation('DEBT_GDP', 'DEBT-TO-GDP RATIO', '-100.*GOV__F/BUS__SUP_GOOD') mod.AddGlobalEquation('DEFICIT', 'DEFICIT', '-1.*GOV__INC') mod.EquationSolver.MaxTime = 40 mod.main() k = mod.GetTimeSeries('k') Rat = mod.GetTimeSeries('DEBT_GDP') Def = mod.GetTimeSeries('GOV__INC') spend = mod.GetTimeSeries('GOV__DEM_GOOD') p = Quick2DPlot([k, k], [spend, Def], title='Spending and Deficit', filename='intro_X_XX_multiplier_deficit.png', run_now=False) p.Legend = ['G', 'Deficit'] p.LegendPos = 'center left' p.DoPlot() Quick2DPlot(k, Rat, title='Debt-to-GDP Ratio', filename='intro_X_XX_multiplier_debt_gdp.png')
34.636364
113
0.727909
d4fe0f781e9f3139abc2757c5c86104cc2181049
4,135
py
Python
auth_framework/settings.py
DrChai/django-auth-framework
4f9a108de66fe102ff28518b6597ad26b5855518
[ "BSD-2-Clause" ]
null
null
null
auth_framework/settings.py
DrChai/django-auth-framework
4f9a108de66fe102ff28518b6597ad26b5855518
[ "BSD-2-Clause" ]
null
null
null
auth_framework/settings.py
DrChai/django-auth-framework
4f9a108de66fe102ff28518b6597ad26b5855518
[ "BSD-2-Clause" ]
null
null
null
from importlib import import_module from django.conf import settings from django.core.signals import setting_changed SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount") DEFAULTS = { 'UNIQUE_EMAIL': True, 'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin' 'SERIALIZERS': { # 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer', 'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer', 'USERINFO_SERIALIZER': None }, 'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL, 'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin", # SOCIAL LOGINS 'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground' 'SOCIAL_AUTO_SIGNUP': False, # SIGN UP # 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage 'SIGNUP_USERNAME_REQUIRED': False, 'SIGNUP_USERNAME_VALIDATORS': [], 'USE_PASSWORD_TWICE_VALIDATION': True, # ADVANCES 'USE_PHONENUMBER_FIELD': False, 'USE_CELERY_EMAIL': False, 'USE_ID_TOKEN': True, 'OAUTH_SAVE_ID_TOKEN': False } app_settings = AuthSettings(None, DEFAULTS) setting_changed.connect(reload_app_settings)
33.08
117
0.641112
d4ff76335b31237c5497fc74cfffe7b1e1ab18a8
317
py
Python
shorty/models.py
gkiserpong/shorty
5795e26f3221d581223e37353bee360454532211
[ "MIT" ]
null
null
null
shorty/models.py
gkiserpong/shorty
5795e26f3221d581223e37353bee360454532211
[ "MIT" ]
null
null
null
shorty/models.py
gkiserpong/shorty
5795e26f3221d581223e37353bee360454532211
[ "MIT" ]
null
null
null
from django.db import models from shorty.manager import UrlManager
22.642857
58
0.684543
be0006e92a529db72d1a914a113e9040dbe56c1e
48,343
py
Python
test/sec_full.py
time-track-tool/time-track-tool
a1c280f32a7766e460c862633b748fa206256f24
[ "MIT" ]
null
null
null
test/sec_full.py
time-track-tool/time-track-tool
a1c280f32a7766e460c862633b748fa206256f24
[ "MIT" ]
1
2019-07-03T13:32:38.000Z
2019-07-03T13:32:38.000Z
test/sec_full.py
time-track-tool/time-track-tool
a1c280f32a7766e460c862633b748fa206256f24
[ "MIT" ]
1
2019-05-15T16:01:31.000Z
2019-05-15T16:01:31.000Z
security = """ New Web users get the Roles "User,Nosy" New Email users get the Role "User" Role "admin": User may access the rest interface (Rest Access) User may access the web interface (Web Access) User may access the xmlrpc interface (Xmlrpc Access) User may create everything (Create) User may edit everything (Edit) User may manipulate user Roles through the web (Web Roles) User may restore everything (Restore) User may retire everything (Retire) User may use the email interface (Email Access) User may view everything (View) Role "anonymous": User may access the web interface (Web Access) Role "cc-permission": (Restore for "cost_center_permission_group" only) (Retire for "cost_center_permission_group" only) User is allowed to create cost_center_permission_group (Create for "cost_center_permission_group" only) User is allowed to edit cost_center_permission_group (Edit for "cost_center_permission_group" only) Role "contact": User is allowed to create contact (Create for "contact" only) User is allowed to edit contact (Edit for "contact" only) Role "controlling": User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only) User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'team_lead') only) User is allowed Edit on (Edit for "time_project": ('group_lead', 'team_lead') only) User is allowed Edit on (Edit for "time_wp": ('project',) only) User is allowed View on (View for "user": ('roles',) only) User is allowed View on (View for "user_dynamic": ('id', 'sap_cc', 'user', 'valid_from', 'valid_to') only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access daily_record (View for "daily_record" only) User is allowed to access daily_record_freeze (View for "daily_record_freeze" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access overtime_correction (View for "overtime_correction" only) User is allowed to access query (View for "query" only) User is allowed to access time_project (View for "time_project" only) User is allowed to access time_record (View for "time_record" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create cost_center (Create for "cost_center" only) User is allowed to create cost_center_group (Create for "cost_center_group" only) User is allowed to create cost_center_status (Create for "cost_center_status" only) User is allowed to create department (Create for "department" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to create product_family (Create for "product_family" only) User is allowed to create public_holiday (Create for "public_holiday" only) User is allowed to create query (Create for "query" only) User is allowed to create reporting_group (Create for "reporting_group" only) User is allowed to create sap_cc (Create for "sap_cc" only) User is allowed to create time_activity (Create for "time_activity" only) User is allowed to create time_activity_perm (Create for "time_activity_perm" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create work_location (Create for "work_location" only) User is allowed to edit cost_center (Edit for "cost_center" only) User is allowed to edit cost_center_group (Edit for "cost_center_group" only) User is allowed to edit cost_center_status (Edit for "cost_center_status" only) User is allowed to edit department (Edit for "department" only) User is allowed to edit organisation (Edit for "organisation" only) User is allowed to edit product_family (Edit for "product_family" only) User is allowed to edit public_holiday (Edit for "public_holiday" only) User is allowed to edit query (Edit for "query" only) User is allowed to edit reporting_group (Edit for "reporting_group" only) User is allowed to edit sap_cc (Edit for "sap_cc" only) User is allowed to edit time_activity (Edit for "time_activity" only) User is allowed to edit time_activity_perm (Edit for "time_activity_perm" only) User is allowed to edit time_record (Edit for "time_record" only) User is allowed to edit work_location (Edit for "work_location" only) Role "doc_admin": User is allowed Edit on (Edit for "department": ('doc_num',) only) User is allowed to create artefact (Create for "artefact" only) User is allowed to create doc (Create for "doc" only) User is allowed to create doc_category (Create for "doc_category" only) User is allowed to create doc_status (Create for "doc_status" only) User is allowed to create product_type (Create for "product_type" only) User is allowed to create reference (Create for "reference" only) User is allowed to edit artefact (Edit for "artefact" only) User is allowed to edit doc (Edit for "doc" only) User is allowed to edit doc_category (Edit for "doc_category" only) User is allowed to edit doc_status (Edit for "doc_status" only) User is allowed to edit product_type (Edit for "product_type" only) User is allowed to edit reference (Edit for "reference" only) Role "dom-user-edit-facility": Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['room'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['room'] only) Role "dom-user-edit-gtt": (Search for "user_dynamic" only) May only view/edit records with the correct domain (Edit for "user_dynamic" only) May only view/edit records with the correct domain (View for "user_dynamic" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to create user (Create for "user" only) User is allowed to create user_contact (Create for "user_contact" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'csv_delimiter', 'department_temp', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'sync_foreign_key', 'timezone', 'tt_lines', 'username', 'vie_user'] only) Role "dom-user-edit-hr": (Search for "user_dynamic" only) May only view/edit records with the correct domain (Edit for "user_dynamic" only) May only view/edit records with the correct domain (View for "user_dynamic" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to create user_contact (Create for "user_contact" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view user_dynamic records for ad_domain for which they are in the domain_permission for the user (View for "user_dynamic" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['clearance_by', 'contacts', 'csv_delimiter', 'entry_date', 'firstname', 'hide_message_files', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'reduced_activity_list', 'roles', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'tt_lines', 'vie_user'] only) Role "dom-user-edit-office": User is allowed to create user_contact (Create for "user_contact" only) User is allowed to edit user_contact (Edit for "user_contact" only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (Edit for "user": ['contacts', 'position_text', 'room'] only) Users may view/edit user records for ad_domain for which they are in the domain_permission for the user (View for "user": ['contacts', 'position_text', 'room'] only) Role "external": (Search for "ext_tracker_state": ('id', 'issue') only) (Search for "user": ('id', 'nickname', 'username') only) External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (Edit for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only) External users are allowed to access issue if they are on the list of allowed external users or there is a transitive permission via containers (View for "issue": ['activity', 'actor', 'area', 'category', 'closed', 'composed_of', 'creation', 'creator', 'cur_est_begin', 'cur_est_end', 'deadline', 'depends', 'doc_issue_status', 'earliest_start', 'effective_prio', 'effort_hours', 'external_users', 'files', 'files_affected', 'fixed_in', 'id', 'keywords', 'kind', 'maturity_index', 'messages', 'needs', 'nosy', 'numeric_effort', 'part_of', 'planned_begin', 'planned_end', 'priority', 'release', 'responsible', 'safety_level', 'severity', 'status', 'superseder', 'test_level', 'title'] only) User is allowed View on (View for "category": ('id', 'name') only) User is allowed View on (View for "user": ('nickname', 'status', 'username') only) User is allowed View on (View for "user_status": ('name',) only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only) User is allowed to access area (View for "area" only) User is allowed to access doc_issue_status (View for "doc_issue_status" only) User is allowed to access ext_tracker (View for "ext_tracker" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to access keyword (View for "keyword" only) User is allowed to access kind (View for "kind" only) User is allowed to access msg_keyword (View for "msg_keyword" only) User is allowed to access safety_level (View for "safety_level" only) User is allowed to access severity (View for "severity" only) User is allowed to access status (View for "status" only) User is allowed to access status_transition (View for "status_transition" only) User is allowed to access test_level (View for "test_level" only) User is allowed to create file (Create for "file" only) User is allowed to create issue (Create for "issue" only) User is allowed to create msg (Create for "msg" only) User is allowed to create query (Create for "query" only) User is allowed to edit their queries (Edit for "query" only) User is allowed to retire their queries (Retire for "query" only) User is allowed to search for their own files (Search for "file" only) User is allowed to search for their own messages (Search for "msg" only) User is allowed to search for their queries (Search for "query" only) User is allowed to search issue (Search for "issue" only) User is allowed to view their own files (View for "file" only) User may access the web interface (Web Access) User may use the email interface (Email Access) Users are allowed to edit some of their details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'password', 'timezone') only) Users are allowed to view some of their details (View for "user": ('activity', 'actor', 'creation', 'creator', 'firstname', 'lastname', 'realname', 'username') only) Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only) Role "facility": (Restore for "room" only) (Retire for "room" only) User is allowed to create room (Create for "room" only) User is allowed to edit room (Edit for "room" only) Role "functional-role": (Restore for "user_functional_role" only) (Retire for "user_functional_role" only) User is allowed Edit on (Edit for "user": ('business_responsible', 'scale_seniority') only) User is allowed View on (View for "user": ('business_responsible', 'planning_role', 'scale_seniority') only) User is allowed to access user_functional_role (View for "user_functional_role" only) User is allowed to create user_functional_role (Create for "user_functional_role" only) User is allowed to edit user_functional_role (Edit for "user_functional_role" only) Role "hr": (Edit for "overtime_period": ('name', 'order') only) (Restore for "room" only) (Retire for "room" only) User is allowed Edit on (Edit for "daily_record": ('required_overtime', 'weekend_allowed') only) User is allowed Edit on (Edit for "daily_record": ('status', 'time_record') only) User is allowed Edit on (Edit for "time_project": ('approval_hr', 'approval_required', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'no_overtime', 'no_overtime_day', 'only_hours', 'overtime_reduction') only) User is allowed View on (View for "user": ('contacts',) only) User is allowed to access auto_wp (View for "auto_wp" only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access daily_record (View for "daily_record" only) User is allowed to access daily_record_freeze (View for "daily_record_freeze" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access overtime_correction (View for "overtime_correction" only) User is allowed to access time_record (View for "time_record" only) User is allowed to access user_contact (View for "user_contact" only) User is allowed to access user_dynamic (View for "user_dynamic" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create auto_wp (Create for "auto_wp" only) User is allowed to create daily_record_freeze (Create for "daily_record_freeze" only) User is allowed to create location (Create for "location" only) User is allowed to create org_location (Create for "org_location" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to create overtime_correction (Create for "overtime_correction" only) User is allowed to create overtime_period (Create for "overtime_period" only) User is allowed to create product_family (Create for "product_family" only) User is allowed to create public_holiday (Create for "public_holiday" only) User is allowed to create reporting_group (Create for "reporting_group" only) User is allowed to create room (Create for "room" only) User is allowed to create sap_cc (Create for "sap_cc" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create uc_type (Create for "uc_type" only) User is allowed to create user (Create for "user" only) User is allowed to create user_dynamic (Create for "user_dynamic" only) User is allowed to edit auto_wp (Edit for "auto_wp" only) User is allowed to edit dynamic user data if not frozen in validity span of dynamic user record (Edit for "user_dynamic" only) User is allowed to edit freeze record if not frozen at the given date (Edit for "daily_record_freeze": ('frozen',) only) User is allowed to edit location (Edit for "location" only) User is allowed to edit org_location (Edit for "org_location" only) User is allowed to edit organisation (Edit for "organisation" only) User is allowed to edit overtime correction if the overtime correction is not frozen (Edit for "overtime_correction" only) User is allowed to edit product_family (Edit for "product_family" only) User is allowed to edit public_holiday (Edit for "public_holiday" only) User is allowed to edit reporting_group (Edit for "reporting_group" only) User is allowed to edit room (Edit for "room" only) User is allowed to edit sap_cc (Edit for "sap_cc" only) User is allowed to edit time_record (Edit for "time_record" only) User is allowed to edit uc_type (Edit for "uc_type" only) User may manipulate user Roles through the web (Web Roles) Role "hr-leave-approval": User is allowed Edit on (Edit for "leave_submission": ('status',) only) User is allowed to access contract_type (View for "contract_type" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access vacation_correction (View for "vacation_correction" only) Role "hr-org-location": (Search for "daily_record_freeze" only) (Search for "overtime_correction" only) (Search for "time_activity_perm" only) (Search for "time_record" only) (Search for "user_dynamic" only) User is allowed to view dynamic user data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "user_dynamic" only) User is allowed to view freeze information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "daily_record_freeze" only) User is allowed to view overtime information if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "overtime_correction" only) User is allowed to view time record data if he/she is in group HR-Org-Location and in the same Org-Location as the given user (View for "time_record" only) Role "hr-vacation": User is allowed to access contract_type (View for "contract_type" only) User is allowed to access leave_submission (View for "leave_submission" only) User is allowed to access vacation_correction (View for "vacation_correction" only) User is allowed to create contract_type (Create for "contract_type" only) User is allowed to create leave_submission (Create for "leave_submission" only) User is allowed to create vacation_correction (Create for "vacation_correction" only) User is allowed to edit contract_type (Edit for "contract_type" only) User is allowed to edit leave_submission (Edit for "leave_submission" only) User is allowed to edit vacation_correction (Edit for "vacation_correction" only) Role "issue_admin": User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only) User is allowed to access issue (View for "issue" only) User is allowed to create area (Create for "area" only) User is allowed to create category (Create for "category" only) User is allowed to create doc_issue_status (Create for "doc_issue_status" only) User is allowed to create ext_tracker (Create for "ext_tracker" only) User is allowed to create issue (Create for "issue" only) User is allowed to create keyword (Create for "keyword" only) User is allowed to create kind (Create for "kind" only) User is allowed to create msg_keyword (Create for "msg_keyword" only) User is allowed to create safety_level (Create for "safety_level" only) User is allowed to create severity (Create for "severity" only) User is allowed to create status (Create for "status" only) User is allowed to create status_transition (Create for "status_transition" only) User is allowed to create test_level (Create for "test_level" only) User is allowed to edit area (Edit for "area" only) User is allowed to edit category (Edit for "category" only) User is allowed to edit doc_issue_status (Edit for "doc_issue_status" only) User is allowed to edit ext_tracker (Edit for "ext_tracker" only) User is allowed to edit issue (Edit for "issue" only) User is allowed to edit keyword (Edit for "keyword" only) User is allowed to edit kind (Edit for "kind" only) User is allowed to edit msg_keyword (Edit for "msg_keyword" only) User is allowed to edit safety_level (Edit for "safety_level" only) User is allowed to edit severity (Edit for "severity" only) User is allowed to edit status (Edit for "status" only) User is allowed to edit status_transition (Edit for "status_transition" only) User is allowed to edit test_level (Edit for "test_level" only) Role "it": Create (Create for "user_contact" only) User is allowed Edit on (Edit for "file": ('name', 'type') only) User is allowed Edit on (Edit for "location": ('domain_part',) only) User is allowed Edit on (Edit for "organisation": ('domain_part',) only) User is allowed Edit on (Edit for "user": ('ad_domain', 'nickname', 'password', 'pictures', 'roles', 'timetracking_by', 'timezone', 'username') only) User is allowed Edit on (Edit for "user": ('address', 'alternate_addresses', 'nickname', 'password', 'timezone', 'username') only) User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only) User is allowed Edit on msg if msg is linked from an item with Edit permission (Edit for "msg" only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed to access domain_permission (View for "domain_permission" only) User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) User is allowed to create domain_permission (Create for "domain_permission" only) User is allowed to create it_category (Create for "it_category" only) User is allowed to create it_int_prio (Create for "it_int_prio" only) User is allowed to create it_issue (Create for "it_issue" only) User is allowed to create it_project (Create for "it_project" only) User is allowed to create it_request_type (Create for "it_request_type" only) User is allowed to create mailgroup (Create for "mailgroup" only) User is allowed to edit domain_permission (Edit for "domain_permission" only) User is allowed to edit it_category (Edit for "it_category" only) User is allowed to edit it_int_prio (Edit for "it_int_prio" only) User is allowed to edit it_issue (Edit for "it_issue" only) User is allowed to edit it_project (Edit for "it_project" only) User is allowed to edit it_request_type (Edit for "it_request_type" only) User is allowed to edit mailgroup (Edit for "mailgroup" only) User may manipulate user Roles through the web (Web Roles) Role "itview": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "msgedit": (Search for "msg": ('date', 'id') only) User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only) User is allowed to access ext_msg (View for "ext_msg" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) Role "msgsync": (Search for "msg": ('date', 'id') only) User is allowed Edit on (Edit for "msg": ('author', 'date', 'id', 'keywords', 'subject', 'summary') only) User is allowed to access ext_msg (View for "ext_msg" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to create ext_msg (Create for "ext_msg" only) User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only) User is allowed to edit ext_msg (Edit for "ext_msg" only) User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only) Role "nosy": User may get nosy messages for doc (Nosy for "doc" only) User may get nosy messages for issue (Nosy for "issue" only) User may get nosy messages for it_issue (Nosy for "it_issue" only) User may get nosy messages for it_project (Nosy for "it_project" only) User may get nosy messages for support (Nosy for "support" only) Role "office": (Restore for "room" only) (Retire for "room" only) User is allowed View on (View for "user": ('contacts',) only) User is allowed to access user_contact (View for "user_contact" only) User is allowed to create absence (Create for "absence" only) User is allowed to create absence_type (Create for "absence_type" only) User is allowed to create room (Create for "room" only) User is allowed to create uc_type (Create for "uc_type" only) User is allowed to edit absence (Edit for "absence" only) User is allowed to edit absence_type (Edit for "absence_type" only) User is allowed to edit room (Edit for "room" only) User is allowed to edit uc_type (Edit for "uc_type" only) Role "organisation": User is allowed to access location (View for "location" only) User is allowed to access org_location (View for "org_location" only) User is allowed to access organisation (View for "organisation" only) User is allowed to create location (Create for "location" only) User is allowed to create org_location (Create for "org_location" only) User is allowed to create organisation (Create for "organisation" only) User is allowed to edit location (Edit for "location" only) User is allowed to edit org_location (Edit for "org_location" only) User is allowed to edit organisation (Edit for "organisation" only) Role "pgp": Role "procurement": (View for "sap_cc" only) (View for "time_project" only) User is allowed Edit on (Edit for "sap_cc": ('group_lead', 'purchasing_agents', 'team_lead') only) User is allowed Edit on (Edit for "time_project": ('group_lead', 'purchasing_agents', 'team_lead') only) Role "project": User is allowed Edit on (Edit for "time_project": ('cost_center', 'department', 'deputy', 'description', 'name', 'nosy', 'organisation', 'responsible', 'status') only) User is allowed Edit on (Edit for "time_project": ('infosec_req', 'is_extern', 'max_hours', 'op_project', 'planned_effort', 'product_family', 'project_type', 'reporting_group', 'work_location') only) User is allowed to access time_project (View for "time_project" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) User is allowed to create time_project (Create for "time_project" only) User is allowed to create time_project_status (Create for "time_project_status" only) User is allowed to create time_wp (Create for "time_wp" only) User is allowed to create time_wp_group (Create for "time_wp_group" only) User is allowed to edit time_project_status (Edit for "time_project_status" only) User is allowed to edit time_wp (Edit for "time_wp" only) User is allowed to edit time_wp_group (Edit for "time_wp_group" only) Role "project_view": User is allowed to access time_project (View for "time_project" only) User is allowed to access time_report (View for "time_report" only) User is allowed to access time_wp (View for "time_wp" only) Role "sec-incident-nosy": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "sec-incident-responsible": User is allowed to access it_int_prio (View for "it_int_prio" only) User is allowed to access it_issue (View for "it_issue" only) User is allowed to access it_project (View for "it_project" only) Role "staff-report": Role "sub-login": Role "summary_view": Role "supportadmin": User is allowed to access analysis_result (View for "analysis_result" only) User is allowed to access contact (View for "contact" only) User is allowed to access customer (View for "customer" only) User is allowed to access customer_agreement (View for "customer_agreement" only) User is allowed to access mailgroup (View for "mailgroup" only) User is allowed to access return_type (View for "return_type" only) User is allowed to access sup_classification (View for "sup_classification" only) User is allowed to access support (View for "support" only) User is allowed to create analysis_result (Create for "analysis_result" only) User is allowed to create contact (Create for "contact" only) User is allowed to create customer (Create for "customer" only) User is allowed to create customer_agreement (Create for "customer_agreement" only) User is allowed to create mailgroup (Create for "mailgroup" only) User is allowed to create return_type (Create for "return_type" only) User is allowed to create sup_classification (Create for "sup_classification" only) User is allowed to create support (Create for "support" only) User is allowed to edit analysis_result (Edit for "analysis_result" only) User is allowed to edit contact (Edit for "contact" only) User is allowed to edit customer (Edit for "customer" only) User is allowed to edit customer_agreement (Edit for "customer_agreement" only) User is allowed to edit mailgroup (Edit for "mailgroup" only) User is allowed to edit return_type (Edit for "return_type" only) User is allowed to edit sup_classification (Edit for "sup_classification" only) User is allowed to edit support (Edit for "support" only) Role "time-report": User is allowed to access time_report (View for "time_report" only) User is allowed to create time_report (Create for "time_report" only) User is allowed to edit time_report (Edit for "time_report" only) User may edit own file (file created by user) (Edit for "file" only) Role "user": (Search for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only) (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'bookers', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) (View for "time_project": ('activity', 'actor', 'creation', 'creator', 'deputy', 'description', 'id', 'is_extern', 'is_public_holiday', 'is_special_leave', 'is_vacation', 'name', 'nosy', 'only_hours', 'op_project', 'overtime_reduction', 'responsible', 'status', 'work_location', 'wps') only) Search (Search for "user_contact" only) User is allowed Edit on (Edit for "msg": ('keywords',) only) User is allowed Edit on file if file is linked from an item with Edit permission (Edit for "file" only) User is allowed Edit on issue if issue is non-confidential or user is on nosy list (Edit for "issue" only) User is allowed Edit on it_issue if it_issue is non-confidential or user is on nosy list (Edit for "it_issue": ('messages', 'files', 'nosy') only) User is allowed Edit on it_project if it_project is non-confidential or user is on nosy list (Edit for "it_project": ('messages', 'files', 'nosy') only) User is allowed Edit on support if support is non-confidential or user is on nosy list (Edit for "support": ('analysis_end', 'analysis_result', 'analysis_start', 'bcc', 'business_unit', 'category', 'cc', 'cc_emails', 'classification', 'closed', 'confidential', 'customer', 'emails', 'execution', 'external_ref', 'files', 'goods_received', 'goods_sent', 'lot', 'messages', 'nosy', 'number_effected', 'numeric_effort', 'prio', 'prodcat', 'product', 'related_issues', 'related_support', 'release', 'responsible', 'return_type', 'sap_ref', 'send_to_customer', 'serial_number', 'set_first_reply', 'status', 'superseder', 'title', 'type', 'warranty') only) User is allowed View on (View for "user": ('activity', 'actor', 'ad_domain', 'address', 'alternate_addresses', 'business_responsible', 'clearance_by', 'creation', 'creator', 'firstname', 'id', 'job_description', 'lastname', 'lunch_duration', 'lunch_start', 'nickname', 'pictures', 'position_text', 'queries', 'realname', 'room', 'sex', 'status', 'subst_active', 'substitute', 'supervisor', 'timezone', 'title', 'tt_lines', 'username') only) User is allowed View on (View for "user": ('activity', 'actor', 'address', 'alternate_addresses', 'creation', 'creator', 'id', 'queries', 'realname', 'status', 'timezone', 'username') only) User is allowed View on (View for "user": ('business_responsible', 'department_temp', 'timetracking_by', 'vie_user', 'vie_user_bl_override', 'vie_user_ml') only) User is allowed View on (View for "user": ('contacts',) only) User is allowed View on (View for "user_dynamic": ('department', 'org_location') only) User is allowed View on file if file is linked from an item with View permission (View for "file" only) User is allowed View on issue if issue is non-confidential or user is on nosy list (View for "issue" only) User is allowed View on it_issue if it_issue is non-confidential or user is on nosy list (View for "it_issue" only) User is allowed View on it_project if it_project is non-confidential or user is on nosy list (View for "it_project" only) User is allowed View on msg if msg is linked from an item with View permission (View for "msg" only) User is allowed View on support if support is non-confidential or user is on nosy list (View for "support" only) User is allowed to access absence (View for "absence" only) User is allowed to access absence_type (View for "absence_type" only) User is allowed to access analysis_result (View for "analysis_result" only) User is allowed to access area (View for "area" only) User is allowed to access artefact (View for "artefact" only) User is allowed to access business_unit (View for "business_unit" only) User is allowed to access category (View for "category" only) User is allowed to access contact (View for "contact" only) User is allowed to access contact_type (View for "contact_type" only) User is allowed to access cost_center (View for "cost_center" only) User is allowed to access cost_center_group (View for "cost_center_group" only) User is allowed to access cost_center_permission_group (View for "cost_center_permission_group" only) User is allowed to access cost_center_status (View for "cost_center_status" only) User is allowed to access customer (View for "customer" only) User is allowed to access customer_agreement (View for "customer_agreement" only) User is allowed to access daily record if he is owner or supervisor or timetracking-by user (Edit for "daily_record": ('status', 'time_record') only) User is allowed to access daily record if he is owner or supervisor or timetracking-by user (View for "daily_record" only) User is allowed to access daily_record_status (View for "daily_record_status" only) User is allowed to access department (View for "department" only) User is allowed to access doc (View for "doc" only) User is allowed to access doc_category (View for "doc_category" only) User is allowed to access doc_issue_status (View for "doc_issue_status" only) User is allowed to access doc_status (View for "doc_status" only) User is allowed to access ext_tracker (View for "ext_tracker" only) User is allowed to access ext_tracker_state (View for "ext_tracker_state" only) User is allowed to access ext_tracker_type (View for "ext_tracker_type" only) User is allowed to access functional_role (View for "functional_role" only) User is allowed to access it_category (View for "it_category" only) User is allowed to access it_issue_status (View for "it_issue_status" only) User is allowed to access it_prio (View for "it_prio" only) User is allowed to access it_project_status (View for "it_project_status" only) User is allowed to access it_request_type (View for "it_request_type" only) User is allowed to access keyword (View for "keyword" only) User is allowed to access kind (View for "kind" only) User is allowed to access leave_status (View for "leave_status" only) User is allowed to access location (View for "location" only) User is allowed to access mailgroup (View for "mailgroup" only) User is allowed to access msg_keyword (View for "msg_keyword" only) User is allowed to access org_group (View for "org_group" only) User is allowed to access org_location (View for "org_location" only) User is allowed to access organisation (View for "organisation" only) User is allowed to access overtime_period (View for "overtime_period" only) User is allowed to access prodcat (View for "prodcat" only) User is allowed to access product (View for "product" only) User is allowed to access product_family (View for "product_family" only) User is allowed to access product_type (View for "product_type" only) User is allowed to access project_type (View for "project_type" only) User is allowed to access public_holiday (View for "public_holiday" only) User is allowed to access reference (View for "reference" only) User is allowed to access reporting_group (View for "reporting_group" only) User is allowed to access return_type (View for "return_type" only) User is allowed to access room (View for "room" only) User is allowed to access safety_level (View for "safety_level" only) User is allowed to access sap_cc (View for "sap_cc" only) User is allowed to access severity (View for "severity" only) User is allowed to access sex (View for "sex" only) User is allowed to access status (View for "status" only) User is allowed to access status_transition (View for "status_transition" only) User is allowed to access summary_report (View for "summary_report" only) User is allowed to access summary_type (View for "summary_type" only) User is allowed to access sup_classification (View for "sup_classification" only) User is allowed to access sup_execution (View for "sup_execution" only) User is allowed to access sup_prio (View for "sup_prio" only) User is allowed to access sup_status (View for "sup_status" only) User is allowed to access sup_type (View for "sup_type" only) User is allowed to access sup_warranty (View for "sup_warranty" only) User is allowed to access test_level (View for "test_level" only) User is allowed to access time_activity (View for "time_activity" only) User is allowed to access time_activity_perm (View for "time_activity_perm" only) User is allowed to access time_project_status (View for "time_project_status" only) User is allowed to access time_wp_group (View for "time_wp_group" only) User is allowed to access time_wp_summary_no (View for "time_wp_summary_no" only) User is allowed to access timesheet (View for "timesheet" only) User is allowed to access uc_type (View for "uc_type" only) User is allowed to access user_status (View for "user_status" only) User is allowed to access vac_aliq (View for "vac_aliq" only) User is allowed to access vacation_report (View for "vacation_report" only) User is allowed to access work_location (View for "work_location" only) User is allowed to create daily_record (Create for "daily_record" only) User is allowed to create doc (Create for "doc" only) User is allowed to create ext_tracker_state (Create for "ext_tracker_state" only) User is allowed to create file (Create for "file" only) User is allowed to create issue (Create for "issue" only) User is allowed to create it_issue (Create for "it_issue" only) User is allowed to create leave_submission (Create for "leave_submission" only) User is allowed to create msg (Create for "msg" only) User is allowed to create queries (Create for "query" only) User is allowed to create support (Create for "support" only) User is allowed to create time_record (Create for "time_record" only) User is allowed to create time_wp (Create for "time_wp" only) User is allowed to edit (some of) their own user details (Edit for "user": ('csv_delimiter', 'hide_message_files', 'lunch_duration', 'lunch_start', 'password', 'queries', 'realname', 'room', 'subst_active', 'substitute', 'timezone', 'tt_lines') only) User is allowed to edit category if he is responsible for it (Edit for "category": ('nosy', 'default_part_of') only) User is allowed to edit doc (Edit for "doc" only) User is allowed to edit ext_tracker_state (Edit for "ext_tracker_state" only) User is allowed to edit if he's the owner of the contact (Edit for "user_contact": ('visible',) only) User is allowed to edit several fields if he is Responsible for an it_issue (Edit for "it_issue": ('responsible',) only) User is allowed to edit several fields if he is Stakeholder/Responsible for an it_issue (Edit for "it_issue": ('deadline', 'status', 'title') only) User is allowed to edit their queries (Edit for "query" only) User is allowed to edit time category if the status is "Open" and he is responsible for the time category (Edit for "time_project": ('deputy', 'planned_effort', 'nosy') only) User is allowed to edit workpackage if he is time category owner or deputy (Edit for "time_wp": ('cost_center', 'is_public', 'name', 'responsible', 'time_wp_summary_no', 'wp_no') only) User is allowed to retire their queries (Retire for "query" only) User is allowed to search daily_record (Search for "daily_record" only) User is allowed to search for their own files (Search for "file" only) User is allowed to search for their own messages (Search for "msg" only) User is allowed to search for their queries (Search for "query" only) User is allowed to search issue (Search for "issue" only) User is allowed to search it_issue (Search for "it_issue" only) User is allowed to search it_project (Search for "it_project" only) User is allowed to search leave_submission (Search for "leave_submission" only) User is allowed to search support (Search for "support" only) User is allowed to search time_record (Search for "time_record" only) User is allowed to search time_wp (Search for "time_wp": ('activity', 'actor', 'auto_wp', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'is_extern', 'is_public', 'id', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) User is allowed to search user_status (Search for "user": ('status',) only) User is allowed to see time record if he is allowed to see all details on work package or User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "time_record" only) User is allowed to view (some of) their own user details (View for "user": ('entry_date', 'planning_role') only) User is allowed to view contact if he's the owner of the contact or the contact is marked visible (View for "user_contact" only) User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (Edit for "leave_submission": ('status',) only) User is allowed to view leave submission if he is the supervisor or the person to whom approvals are delegated (View for "leave_submission" only) User is allowed to view selected fields in work package if booking is allowed for this user (also applies to timetracking by, supervisor and approval delegated) (View for "time_wp": ('activity', 'actor', 'cost_center', 'creation', 'creator', 'description', 'durations_allowed', 'epic_key', 'has_expiration_date', 'id', 'is_extern', 'is_public', 'name', 'project', 'responsible', 'time_end', 'time_start', 'time_wp_summary_no', 'travel', 'wp_no') only) User is allowed to view their own files (View for "file" only) User is allowed to view their own messages (View for "msg" only) User is allowed to view their own overtime information (View for "overtime_correction" only) User is allowed to view time record if he is the supervisor or the person to whom approvals are delegated (View for "time_record" only) User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_project": ('name',) only) User is allowed to view work package and time category names if he/she has role HR or HR-Org-Location (View for "time_wp": ('name', 'project') only) User is allowed to view/edit workpackage if he is owner or project responsible/deputy (Edit for "time_wp": ('bookers', 'description', 'epic_key', 'planned_effort', 'time_end', 'time_start', 'time_wp_summary_no') only) User may access the rest interface (Rest Access) User may access the web interface (Web Access) User may access the xmlrpc interface (Xmlrpc Access) User may edit own leave submissions (Edit for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only) User may edit own leave submissions (View for "leave_submission": ('comment', 'comment_cancel', 'first_day', 'last_day', 'status', 'time_wp', 'user') only) User may see time report if reponsible or deputy of time project or on nosy list of time project (View for "time_report" only) User may use the email interface (Email Access) User may view a daily_record (and time_records that are attached to that daily_record) if the user owns the daily_record or has role 'HR' or 'Controlling', or the user is supervisor or substitute supervisor of the owner of the daily record (the supervisor relationship is transitive) or the user is the department manager of the owner of the daily record. If user has role HR-Org-Location and is in the same Org-Location as the record, it may also be seen (View for "daily_record" only) User may view their own user functional role (View for "user_functional_role" only) User may view time category if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_project" only) User may view work package if responsible for it, if user is owner or deputy of time category or on nosy list of time category or if user is department manager of time category (View for "time_wp" only) User or Timetracking by user may edit time_records owned by user (Edit for "time_record" only) User or Timetracking by user may edit time_records owned by user (Restore for "time_record" only) User or Timetracking by user may edit time_records owned by user (Retire for "time_record" only) User or Timetracking by user may edit time_records owned by user (View for "time_record" only) Users are allowed to view their own and public queries for classes where they have search permission (View for "query" only) Users may see daily record if they may see one of the time_records for that day (View for "daily_record" only) Role "user_view": User is allowed to access user (View for "user" only) Role "vacation-report": """.strip ()
83.063574
690
0.762034
be004417db97934b47985fcf6b9c727896247c48
220
py
Python
CodeChef/problems/IMDB/main.py
object-oriented-human/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
1
2022-02-21T15:43:01.000Z
2022-02-21T15:43:01.000Z
CodeChef/problems/IMDB/main.py
foooop/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
null
null
null
CodeChef/problems/IMDB/main.py
foooop/competitive
9e761020e887d8980a39a64eeaeaa39af0ecd777
[ "MIT" ]
null
null
null
tc = int(input()) while tc: tc -= 1 best = 0 n, x = map(int, input().split()) for i in range(n): s, r = map(int, input().split()) if x >= s: best = max(best, r) print(best)
22
40
0.445455
be0099fd02ee40c6a15038fa8158d18b025dd23d
3,218
py
Python
tests/test_sqlite_wrapper.py
Privex/python-db
3b46b34b4310973e2e2a30a66adaa853fd10340d
[ "X11" ]
1
2019-12-19T13:12:53.000Z
2019-12-19T13:12:53.000Z
tests/test_sqlite_wrapper.py
Privex/python-db
3b46b34b4310973e2e2a30a66adaa853fd10340d
[ "X11" ]
9
2020-02-24T20:14:53.000Z
2021-04-30T21:51:04.000Z
tests/test_sqlite_wrapper.py
Privex/python-db
3b46b34b4310973e2e2a30a66adaa853fd10340d
[ "X11" ]
null
null
null
""" Tests related to :class:`.SqliteWrapper` / :class:`.ExampleWrapper` """ # from unittest import TestCase from tests.base import *
31.242718
97
0.579863
be00d24937df6595d3c59f1ae767515161b8f7ef
5,320
py
Python
var/spack/repos/builtin/packages/strumpack/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
9
2018-04-18T07:51:40.000Z
2021-09-10T03:56:57.000Z
var/spack/repos/builtin/packages/strumpack/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
907
2018-04-18T11:17:57.000Z
2022-03-31T13:20:25.000Z
var/spack/repos/builtin/packages/strumpack/package.py
robertodr/spack
9b809e01b47d48f01b3d257912fe1b752943cd3d
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
29
2018-11-05T16:14:23.000Z
2022-02-03T16:07:09.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import *
42.56
95
0.638346
be011eb0f4bc43a928140f63592325792f0414b5
6,318
py
Python
actionserver/actions/action_feedbackform.py
Ajju2211/frendy-bot
b86a7a3cb3fb54b300ad9b870defb947f22dc146
[ "Apache-2.0" ]
null
null
null
actionserver/actions/action_feedbackform.py
Ajju2211/frendy-bot
b86a7a3cb3fb54b300ad9b870defb947f22dc146
[ "Apache-2.0" ]
null
null
null
actionserver/actions/action_feedbackform.py
Ajju2211/frendy-bot
b86a7a3cb3fb54b300ad9b870defb947f22dc146
[ "Apache-2.0" ]
null
null
null
from typing import Any, Text, Dict, List, Union from rasa_sdk import Action, Tracker from rasa_sdk.executor import CollectingDispatcher from rasa_sdk.forms import FormAction from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction # from rasa_core.events import (UserUtteranceReverted, UserUttered, # ActionExecuted, Event) from rasa_sdk.events import AllSlotsReset, SlotSet from rasa.core.constants import REQUESTED_SLOT from rasa.core.slots import Slot import pandas as pd import json from actionserver.utils import utilities as util from actionserver.controllers.faqs.faq import FAQ from actionserver.controllers.constants.orderForm import * import logging from actionserver.utils.utilities import INVALID_VALUE product_list = [] quant_list = [] # takes quantity from user logger = logging.getLogger(__name__) with open(r'./actionserver/custom_payload.json') as f: frendy_product_menu = json.load(f) # Code snippet for global back # return [Restarted(), UserUttered(text="/get_started", parse_data={ # "intent": {"confidence": 1.0, "name": "get_started"}, # "entities": [] # }), FollowupAction(name="utter_greet")]
37.832335
181
0.597341
be01c82117aa2911b241e39136b462d24502c315
793
py
Python
dash/graphs.py
fuzzylabs/wearable-my-foot
5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6
[ "Apache-2.0" ]
5
2020-09-04T13:49:41.000Z
2021-07-30T02:33:49.000Z
dash/graphs.py
archena/wearable-my-foot
5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6
[ "Apache-2.0" ]
2
2020-09-24T07:55:43.000Z
2020-09-24T09:30:19.000Z
dash/graphs.py
archena/wearable-my-foot
5e7d818fc51a3d3babbe1c0ec49450b1a1f030c6
[ "Apache-2.0" ]
1
2021-03-04T03:18:37.000Z
2021-03-04T03:18:37.000Z
import plotly.graph_objs as go
29.37037
59
0.493064
be01e27689f95fbc7033b6a5da2ab015674dada0
2,909
py
Python
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
azure-mgmt-web/azure/mgmt/web/models/app_service_certificate_resource.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2019-06-17T22:18:23.000Z
2019-06-17T22:18:23.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource
38.786667
102
0.625645
be0243ad78899348119ce102fbea0418e12871e2
5,379
py
Python
telethon/tl/functions/stickers.py
polisitni1/DogeClickBot
ac57eaeefca2c6ab9e48458f9f928a6a421a162e
[ "MIT" ]
null
null
null
telethon/tl/functions/stickers.py
polisitni1/DogeClickBot
ac57eaeefca2c6ab9e48458f9f928a6a421a162e
[ "MIT" ]
null
null
null
telethon/tl/functions/stickers.py
polisitni1/DogeClickBot
ac57eaeefca2c6ab9e48458f9f928a6a421a162e
[ "MIT" ]
null
null
null
"""File generated by TLObjects' generator. All changes will be ERASED""" from ...tl.tlobject import TLRequest from typing import Optional, List, Union, TYPE_CHECKING import os import struct if TYPE_CHECKING: from ...tl.types import TypeInputStickerSet, TypeInputUser, TypeInputStickerSetItem, TypeInputDocument
31.641176
117
0.622421
be035d1ced1e70706ec7a59e81ecf6539a9f044b
3,960
py
Python
applications/ChimeraApplication/tests/chimera_analysis_base_test.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
778
2017-01-27T16:29:17.000Z
2022-03-30T03:01:51.000Z
applications/ChimeraApplication/tests/chimera_analysis_base_test.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
6,634
2017-01-15T22:56:13.000Z
2022-03-31T15:03:36.000Z
applications/ChimeraApplication/tests/chimera_analysis_base_test.py
lkusch/Kratos
e8072d8e24ab6f312765185b19d439f01ab7b27b
[ "BSD-4-Clause" ]
224
2017-02-07T14:12:49.000Z
2022-03-06T23:09:34.000Z
import KratosMultiphysics import KratosMultiphysics.KratosUnittest as UnitTest import KratosMultiphysics.ChimeraApplication from KratosMultiphysics.ChimeraApplication.fluid_chimera_analysis import FluidChimeraAnalysis
58.235294
114
0.474495
be045e37a15278ad4b76fd0b0f607b024e9f6bee
925
py
Python
parsers/rss10.py
side-beach-city/SBCLinkCopyTool
12ec16eefddac215e6a2be92464fde75677c8548
[ "Apache-2.0" ]
null
null
null
parsers/rss10.py
side-beach-city/SBCLinkCopyTool
12ec16eefddac215e6a2be92464fde75677c8548
[ "Apache-2.0" ]
2
2021-06-28T01:52:31.000Z
2021-06-28T02:21:18.000Z
parsers/rss10.py
side-beach-city/SBCLinkCopyTool
12ec16eefddac215e6a2be92464fde75677c8548
[ "Apache-2.0" ]
null
null
null
import urllib.request import xml.etree.ElementTree if __name__ == "__main__": import pprint pprint.pprint(RSS10Parser("https://www.youtube.com/feeds/videos.xml?playlist_id=PLrPVslFukDQo7l5RCqAZtKDl6tUyMAFWH").getlist())
37
129
0.655135
be04a0613039c84ca76bcc0ca57e9da1601cdaf5
403
py
Python
examples/laser.py
MPI-IS/reactive_pepper
079f9b0627bfd6c9e3f2a4466c95ad662002a600
[ "BSD-3-Clause" ]
null
null
null
examples/laser.py
MPI-IS/reactive_pepper
079f9b0627bfd6c9e3f2a4466c95ad662002a600
[ "BSD-3-Clause" ]
null
null
null
examples/laser.py
MPI-IS/reactive_pepper
079f9b0627bfd6c9e3f2a4466c95ad662002a600
[ "BSD-3-Clause" ]
null
null
null
import math,time,random import pepper_interface IP = "192.168.0.147" PORT = 9559 simulation = False with pepper_interface.get(IP,PORT,simulation) as pepper: time.sleep(1.0) values,time_stamp = pepper.laser.get() print print "Front" print values["Front"] print print "Left" print values["Left"] print print "Right" print values["Right"] print
14.392857
56
0.647643
be04c82cd5f62929d01752841a8ec17a1254d468
291
py
Python
exercises/pt/exc_01_03_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
2,085
2019-04-17T13:10:40.000Z
2022-03-30T21:51:46.000Z
exercises/pt/exc_01_03_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
79
2019-04-18T14:42:55.000Z
2022-03-07T08:15:43.000Z
exercises/pt/exc_01_03_01.py
Jette16/spacy-course
32df0c8f6192de6c9daba89740a28c0537e4d6a0
[ "MIT" ]
361
2019-04-17T13:34:32.000Z
2022-03-28T04:42:45.000Z
# Importar a classe da lngua inglesa (English) e criar um objeto nlp from ____ import ____ nlp = ____ # Processar o texto doc = ____("I like tree kangaroos and narwhals.") # Selecionar o primeiro token first_token = doc[____] # Imprimir o texto do primeito token print(first_token.____)
22.384615
69
0.75945
be04f5e587c1b673bb12feefbad95d55e8558e6e
3,946
py
Python
tests/integration/mci/test_happy_path.py
qateam123/eq
704757952323647d659c49a71975c56406ff4047
[ "MIT" ]
null
null
null
tests/integration/mci/test_happy_path.py
qateam123/eq
704757952323647d659c49a71975c56406ff4047
[ "MIT" ]
8
2020-03-24T15:24:18.000Z
2022-03-02T04:32:56.000Z
tests/integration/mci/test_happy_path.py
qateam123/eq
704757952323647d659c49a71975c56406ff4047
[ "MIT" ]
null
null
null
from tests.integration.create_token import create_token from tests.integration.integration_test_case import IntegrationTestCase
40.265306
141
0.639635
be0508937eb9d9d5130de65137f4cd2a7335c162
70,784
py
Python
src/transformers/models/hubert/modeling_tf_hubert.py
OllieBroadhurst/transformers
12428f0ef15bb3631e7a5f04672ddb05f363de97
[ "Apache-2.0" ]
1
2022-03-25T01:33:40.000Z
2022-03-25T01:33:40.000Z
src/transformers/models/hubert/modeling_tf_hubert.py
OllieBroadhurst/transformers
12428f0ef15bb3631e7a5f04672ddb05f363de97
[ "Apache-2.0" ]
1
2022-03-23T19:49:13.000Z
2022-03-23T19:49:13.000Z
src/transformers/models/hubert/modeling_tf_hubert.py
erichan1/transformers
12428f0ef15bb3631e7a5f04672ddb05f363de97
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2021 The Fairseq Authors and the HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TensorFlow Hubert model.""" import inspect import warnings from typing import Any, Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput from ...modeling_tf_utils import TFPreTrainedModel, booleans_processing, get_initializer, keras_serializable from ...tf_utils import shape_list from ...tokenization_utils_base import BatchEncoding from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_hubert import HubertConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "HubertConfig" TF_HUBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/hubert-base-ls960", # See all Hubert models at https://huggingface.co/models?filter=hubert ] LARGE_NEGATIVE = -1e8 # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.input_values_processing def input_values_processing(func, config, input_values, **kwargs): """ Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input has to be named accordingly to the parameters name, i.e. `input_values = tf.keras.Input(shape=(128,), dtype='float32', name="input_values")` otherwise the order of the tensors will not be guaranteed during the training. Args: func (`callable`): The callable function of the TensorFlow model. config ([`PretrainedConfig`]): The config of the running model. **kwargs: The inputs of the model. Returns: Two lists, one for the missing layers, and another one for the unexpected layers. """ signature = dict(inspect.signature(func).parameters) signature.pop("kwargs", None) signature.pop("self", None) parameter_names = list(signature.keys()) output = {} allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray) for k, v in kwargs.items(): if isinstance(v, allowed_types) or v is None: output[k] = v else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") if isinstance(input_values, (tuple, list)): for i, input in enumerate(input_values): # EagerTensors don't allow to use the .name property so we check for a real Tensor if type(input) == tf.Tensor: # Tensor names have always the pattern `name:id` then we check only the # `name` part tensor_name = input.name.split(":")[0] if tensor_name in parameter_names: output[tensor_name] = input else: output[parameter_names[i]] = input elif isinstance(input, allowed_types) or input is None: output[parameter_names[i]] = input else: raise ValueError( f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}." ) elif isinstance(input_values, (dict, BatchEncoding)): if "inputs" in input_values: warnings.warn( "The `inputs` argument is deprecated and will be removed in a future version, use `input_values` instead.", FutureWarning, ) output["input_values"] = input_values.pop("inputs") if "decoder_cached_states" in input_values: warnings.warn( "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.", FutureWarning, ) output["past_key_values"] = input_values.pop("decoder_cached_states") for k, v in dict(input_values).items(): if isinstance(v, allowed_types) or v is None: output[k] = v elif k not in parameter_names and "args" not in parameter_names: logger.warning( f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored." ) continue else: raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.") else: if isinstance(input_values, tf.Tensor) or input_values is None: output[parameter_names[0]] = input_values else: raise ValueError( f"Data of type {type(input_values)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}." ) for name in parameter_names: if name not in list(output.keys()) and name != "args": output[name] = kwargs.pop(name, signature[name].default) # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs) # So to respect the proper output we have to add this exception if "args" in output: if output["args"] is not None and type(output["args"]) == tf.Tensor: tensor_name = output["args"].name.split(":")[0] output[tensor_name] = output["args"] else: # `args` in this case is always the first parameter, then `input_values` output["input_values"] = output["args"] del output["args"] if "kwargs" in output: del output["kwargs"] boolean_dict = { k: v for k, v in output.items() if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"] } output.update(booleans_processing(config=config, **boolean_dict)) return output # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._sample_without_replacement def _sample_without_replacement(distribution, num_samples): """ Categorical sampling without replacement is currently not implemented. The gumbel-max trick will do for now - see https://github.com/tensorflow/tensorflow/issues/9260 for more info """ z = -tf.math.log(tf.random.uniform(shape_list(distribution), 0, 1)) _, indices = tf.nn.top_k(distribution + z, num_samples) return indices # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._scatter_values_on_batch_indices def _scatter_values_on_batch_indices(values, batch_indices, output_shape): """ Scatter function as in PyTorch with indices in format (batch_dim, indixes) """ indices_shape = shape_list(batch_indices) # broadcast batch dim to indices_shape broad_casted_batch_dims = tf.reshape( tf.broadcast_to(tf.expand_dims(tf.range(indices_shape[0]), axis=-1), indices_shape), [1, -1] ) # transform batch_indices to pair_indices pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) # scatter values to pair indices return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), output_shape) # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, min_masks: int = 0, ) -> tf.Tensor: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps attention_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_length: size of the mask min_masks: minimum number of masked spans Adapted from [fairseq's data_utils.py](https://github.com/pytorch/fairseq/blob/e0788f7007a8473a76db573985031f3c94201e79/fairseq/data/data_utils.py#L376). """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length} and `sequence_length`: {sequence_length}`" ) # compute number of masked spans in batch num_masked_spans = int(mask_prob * sequence_length / mask_length + tf.random.uniform((1,))) num_masked_spans = max(num_masked_spans, min_masks) # make sure num masked indices <= sequence_length if num_masked_spans * mask_length > sequence_length: num_masked_spans = sequence_length // mask_length # SpecAugment mask to fill spec_aug_mask = tf.zeros((batch_size, sequence_length), dtype=tf.int32) # uniform distribution to sample from, make sure that offset samples are < sequence_length uniform_dist = tf.ones((batch_size, sequence_length - (mask_length - 1))) # get random indices to mask spec_aug_mask_idxs = _sample_without_replacement(uniform_dist, num_masked_spans) # expand masked indices to masked spans spec_aug_mask_idxs = tf.expand_dims(spec_aug_mask_idxs, -1) spec_aug_mask_idxs = tf.tile(spec_aug_mask_idxs, (1, 1, mask_length)) spec_aug_mask_idxs = tf.reshape(spec_aug_mask_idxs, (batch_size, num_masked_spans * mask_length)) offsets = tf.range(mask_length)[tf.newaxis, tf.newaxis, :] offsets = tf.tile(offsets, (batch_size, num_masked_spans, 1)) offsets = tf.reshape(offsets, (batch_size, num_masked_spans * mask_length)) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # scatter indices to mask spec_aug_mask = _scatter_values_on_batch_indices( tf.ones_like(spec_aug_mask_idxs), spec_aug_mask_idxs, spec_aug_mask.shape ) return spec_aug_mask # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNorm with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2WeightNormConv1D with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2NoLayerNormConvLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2LayerNormConvLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2GroupNormConvLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2PositionalConvEmbedding with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2SamePadLayer with Wav2Vec2->Hubert # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with TFBart->TFHubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2FeedForward with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayer with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderLayerStableLayerNorm with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2Encoder with Wav2Vec2->Hubert # Copied from transformers.models.wav2vec2.modeling_tf_wav2vec2.TFWav2Vec2EncoderStableLayerNorm with Wav2Vec2->Hubert HUBERT_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with `input_values` only and nothing else: `model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_values, attention_mask])` or `model([input_values, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_values": input_values, "token_type_ids": token_type_ids})` </Tip> Args: config ([`HubertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ HUBERT_INPUTS_DOCSTRING = r""" Args: input_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`BertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_values` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_values` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False``): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """
42.461908
164
0.656575
be05301485051b024d0504eecb5189daad437a58
3,242
py
Python
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
marioluan/mit-opencourseware-cs
5de013f8e321fed2ff3b7a13e8929a44805db78b
[ "MIT" ]
null
null
null
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
marioluan/mit-opencourseware-cs
5de013f8e321fed2ff3b7a13e8929a44805db78b
[ "MIT" ]
null
null
null
600/unit-1/recursion/problem-set/mit-solutions/ps2_hangman_sol1.py
marioluan/mit-opencourseware-cs
5de013f8e321fed2ff3b7a13e8929a44805db78b
[ "MIT" ]
1
2020-05-19T13:29:18.000Z
2020-05-19T13:29:18.000Z
# 6.00 Problem Set 2 # # Hangman # Name : Solutions # Collaborators : <your collaborators> # Time spent : <total time> # ----------------------------------- # Helper code # You don't need to understand this helper code, # but you will have to know how to use the functions import random import string WORDLIST_FILENAME = "words.txt" def load_words(): """ Returns a list of valid words. Words are strings of lowercase letters. Depending on the size of the word list, this function may take a while to finish. """ print "Loading word list from file..." # inFile: file inFile = open(WORDLIST_FILENAME, 'r', 0) # line: string line = inFile.readline() # wordlist: list of strings wordlist = string.split(line) print " ", len(wordlist), "words loaded." return wordlist def choose_word(wordlist): """ wordlist (list): list of words (strings) Returns a word from wordlist at random """ return random.choice(wordlist) # end of helper code # ----------------------------------- # load the list of words into the wordlist variable # so that it can be accessed from anywhere in the program wordlist = load_words() def partial_word(secret_word, guessed_letters): """ Return the secret_word in user-visible format, with underscores used to replace characters that have not yet been guessed. """ result = '' for letter in secret_word: if letter in guessed_letters: result = result + letter else: result = result + '_' return result def hangman(): """ Runs the hangman game. """ print 'Welcome to the game, Hangman!' secret_word = choose_word(wordlist) print 'I am thinking of a word that is ' + str(len(secret_word)) + ' letters long.' num_guesses = 8 word_guessed = False guessed_letters = '' available_letters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] # Letter-guessing loop. Ask the user to guess a letter and respond to the # user based on whether the word has yet been correctly guessed. while num_guesses > 0 and not word_guessed: print '-------------' print 'You have ' + str(num_guesses) + ' guesses left.' print 'Available letters: ' + ''.join(available_letters) guess = raw_input('Please guess a letter:') if guess not in available_letters: print 'Oops! You\'ve already guessed that letter: ' + partial_word(secret_word, guessed_letters) elif guess not in secret_word: num_guesses -= 1 available_letters.remove(guess) print 'Oops! That letter is not in my word: ' + partial_word(secret_word, guessed_letters) else: available_letters.remove(guess) guessed_letters += guess print 'Good guess: ' + partial_word(secret_word, guessed_letters) if secret_word == partial_word(secret_word, guessed_letters): word_guessed = True if word_guessed: print 'Congratulations, you won!' else: print 'Game over.'
32.42
108
0.604874
be05ff012f40e6f5a4b594110683f58699e3309e
412
py
Python
top/api/rest/FenxiaoRefundMessageAddRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
top/api/rest/FenxiaoRefundMessageAddRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
top/api/rest/FenxiaoRefundMessageAddRequest.py
forestsheep/middleman
34d54f9ffd9d7bcd775a8dcce4f00dd6c5bb1acd
[ "MIT" ]
null
null
null
''' Created by auto_sdk on 2016.04.13 ''' from top.api.base import RestApi
24.235294
55
0.75
be071e34802c8618edb66a1241ddd2e7d443b843
3,316
py
Python
image-generation/slegan/args.py
AaratiAkkapeddi/nnabla-examples
db9e5ad850303c158773aeb275e5c3821b4a3935
[ "Apache-2.0" ]
228
2017-11-20T06:05:56.000Z
2022-03-23T12:40:05.000Z
image-generation/slegan/args.py
AaratiAkkapeddi/nnabla-examples
db9e5ad850303c158773aeb275e5c3821b4a3935
[ "Apache-2.0" ]
36
2018-01-11T23:26:20.000Z
2022-03-12T00:53:38.000Z
image-generation/slegan/args.py
AaratiAkkapeddi/nnabla-examples
db9e5ad850303c158773aeb275e5c3821b4a3935
[ "Apache-2.0" ]
76
2017-11-22T22:00:00.000Z
2022-03-28T05:58:57.000Z
# Copyright 2021 Sony Corporation. # Copyright 2021 Sony Group Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_args(batch_size=8, image_size=256, max_iter=100000): """ Get command line arguments. Arguments set the default values of command line arguments. """ import argparse import os description = "Example of Lightweight GAN." parser = argparse.ArgumentParser(description) parser.add_argument("-d", "--device-id", type=str, default="0", help="Device id.") parser.add_argument("-c", "--context", type=str, default="cudnn", help="Context.") parser.add_argument("--type-config", "-t", type=str, default='float', help='Type of computation. e.g. "float", "half".') parser.add_argument("--img-path", type=str, default="~/AnimalFace-dog", help="Image path.") parser.add_argument("--image-size", type=int, default=image_size, help="Image size.") parser.add_argument("--batch-size", "-b", type=int, default=batch_size, help="Batch size.") parser.add_argument("--max-iter", "-i", type=int, default=max_iter, help="Max iterations.") parser.add_argument("--save-interval", type=int, default=50000, help="Interval for saving models.") parser.add_argument("--test-interval", type=int, default=5000, help="Interval for testing models.") parser.add_argument("--latent", type=int, default=256, help="Number of latent variables.") parser.add_argument("--monitor-path", type=str, default="./result/tmp", help="Monitor path.") parser.add_argument("--model-load-path", type=str, default=".", help="Path to load parameters from") parser.add_argument("--train-samples", type=int, default=-1, help="Number of data to be used. When -1 is set all data is used.") parser.add_argument("--lr", type=float, default=2e-4, help="Learning rate") parser.add_argument("--aug-list", nargs="+", default=["lrflip", "translation", "color"]) args = parser.parse_args() return args
42.512821
91
0.606454
be077745c0ef294c19a02fb08ff66ab17f79fb99
898
py
Python
day1/files_ex1.py
grenn72/pynet-ons-feb19
5aff7dfa6a697214dc24818819a60b46a261d0d3
[ "Apache-2.0" ]
null
null
null
day1/files_ex1.py
grenn72/pynet-ons-feb19
5aff7dfa6a697214dc24818819a60b46a261d0d3
[ "Apache-2.0" ]
null
null
null
day1/files_ex1.py
grenn72/pynet-ons-feb19
5aff7dfa6a697214dc24818819a60b46a261d0d3
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python from __future__ import print_function # READ #### f = open("my_file.txt") print("\nLoop directly over file") print("-" * 60) for line in f: print(line.strip()) print("-" * 60) f.seek(0) my_content = f.readlines() print("\nUse readlines method") print("-" * 60) for line in my_content: print(line.strip()) print("-" * 60) f.seek(0) my_content = f.read() print("\nUse read + splitlines") print("-" * 60) for line in my_content.splitlines(): print(line) print("-" * 60) f.close() with open("my_file.txt") as f: print("\nUse with and loop over file") print("-" * 60) for line in f: print(line.strip()) print("-" * 60) # WRITE #### print("\nWriting file.") f = open("new_file.txt", "w") f.write("whatever2\n") f.close() # APPEND #### print("\nAppending file.") with open("new_file.txt", "a") as f: f.write("something else\n") print()
18.708333
42
0.614699
be09ed482ae6fd03e6f106d0795f2a118eb2425c
2,332
py
Python
test/integration_tests/test_integration_datasets_client.py
self-host/selfhost-python-client
95797ef819099174d916b10e82878c370b1cd972
[ "MIT" ]
null
null
null
test/integration_tests/test_integration_datasets_client.py
self-host/selfhost-python-client
95797ef819099174d916b10e82878c370b1cd972
[ "MIT" ]
null
null
null
test/integration_tests/test_integration_datasets_client.py
self-host/selfhost-python-client
95797ef819099174d916b10e82878c370b1cd972
[ "MIT" ]
null
null
null
import uuid from typing import List, Dict, Any import unittest from selfhost_client import SelfHostClient, DatasetType
36.4375
100
0.653945
be09ff199c76d0416c7ca2377918a44850900a71
909
py
Python
setup.py
pnxenopoulos/soccer-data-gen
bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5
[ "MIT" ]
null
null
null
setup.py
pnxenopoulos/soccer-data-gen
bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5
[ "MIT" ]
null
null
null
setup.py
pnxenopoulos/soccer-data-gen
bdc31be973eb12cdd9f58b04ab61ea9d5d1aa7a5
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages setup( name="soccergen", version="0.1", packages=find_packages(), # Project uses reStructuredText, so ensure that the docutils get # installed or upgraded on the target machine install_requires=["gfootball>=2.8",], # metadata to display on PyPI author="Peter Xenopoulos", author_email="[email protected]", description="Soccer trajectory and event data generation", keywords="soccer data-generation foootball", url="https://github.com/pnxenopoulos/soccer-data-gen", # project home page, if any project_urls={ "Issues": "https://github.com/pnxenopoulos/soccer-data-gen/issues", "Documentation": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/", "Github": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/", }, classifiers=["License :: OSI Approved :: MIT License"], )
39.521739
87
0.693069
be0a74b4d28b5ee5afbbd8993134c1568bbdff10
6,516
py
Python
metaspace/engine/sm/engine/tests/test_fdr.py
METASPACE2020/METASPACE
e1acd9a409f84a78eed7ca9713258c09b0e137ca
[ "Apache-2.0" ]
null
null
null
metaspace/engine/sm/engine/tests/test_fdr.py
METASPACE2020/METASPACE
e1acd9a409f84a78eed7ca9713258c09b0e137ca
[ "Apache-2.0" ]
null
null
null
metaspace/engine/sm/engine/tests/test_fdr.py
METASPACE2020/METASPACE
e1acd9a409f84a78eed7ca9713258c09b0e137ca
[ "Apache-2.0" ]
null
null
null
from itertools import product from unittest.mock import patch import pytest import numpy as np import pandas as pd from pandas.util.testing import assert_frame_equal from sm.engine.annotation.fdr import FDR, run_fdr_ranking from sm.engine.formula_parser import format_modifiers FDR_CONFIG = {'decoy_sample_size': 2} def test_estimate_fdr_digitize_works(): fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=['+H'], analysis_version=1, ) fdr.fdr_levels = [0.4, 0.8] fdr.td_df = pd.DataFrame( [['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'], ) msm_df = pd.DataFrame( [ ['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], ['C4', '+Co', 0.1], ], columns=['formula', 'modifier', 'msm'], ) exp_sf_df = pd.DataFrame( [ ['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], ['C4', '+H', 0.25, 0.8], ], columns=['formula', 'modifier', 'msm', 'fdr'], ) assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df) def test_ions(): formulas = ['H2O', 'C5H2OH'] target_adducts = ['+H', '+Na'] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair assert ( len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts) < len(ions) <= len(formulas) * len(target_adducts) * decoy_sample_size + len(formulas) * len(target_adducts) ) target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)] assert set(target_ions).issubset(set(map(tuple, ions))) def test_chem_mods_and_neutral_losses(): formulas = ['H2O', 'C5H2OH'] chem_mods = ['-H+C'] neutral_losses = ['-O', '-C'] target_adducts = ['+H', '+Na', '[M]+'] target_modifiers = [ format_modifiers(cm, nl, ta) for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts) ] decoy_sample_size = 5 fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size} fdr = FDR( fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=neutral_losses, target_adducts=target_adducts, analysis_version=1, ) fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH']) ions = fdr.ion_tuples() assert type(ions) == list # total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair min_count = len(formulas) * len(target_modifiers) max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size) assert min_count < len(ions) <= max_count target_ions = list(product(formulas, target_modifiers)) assert set(target_ions).issubset(set(map(tuple, ions))) def test_run_fdr_ranking(): target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1]) n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]) n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]) expected_fdr = n_decoys / n_targets expected_fdr_ros = (n_decoys + 1) / (n_targets + 1) expected_fdr_mono = pd.Series( [0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11] ) fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False) fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False) fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True) assert np.isclose(fdr, expected_fdr).all() assert np.isclose(fdr_ros, expected_fdr_ros).all() assert np.isclose(fdr_mono, expected_fdr_mono).all()
32.58
121
0.558778
be0b585df12c7b4d77e31edbf4786b2ef1e4a31b
69
py
Python
tests/__init__.py
acarl005/plotille
44089a88f20b71b3314416947ae724bebbdc7739
[ "MIT" ]
2
2020-04-08T15:31:12.000Z
2020-07-01T11:04:47.000Z
tests/__init__.py
acarl005/plotille
44089a88f20b71b3314416947ae724bebbdc7739
[ "MIT" ]
9
2018-09-12T09:29:43.000Z
2020-03-15T09:11:25.000Z
tests/__init__.py
acarl005/plotille
44089a88f20b71b3314416947ae724bebbdc7739
[ "MIT" ]
1
2019-03-29T10:59:13.000Z
2019-03-29T10:59:13.000Z
from logging import getLogger getLogger('flake8').propagate = False
17.25
37
0.797101
be0c9d39fc49b73642a31f8fb89de4fff31f8d63
4,576
py
Python
umigame/nlp/labelling.py
penguinwang96825/Umigame
98d647ab6f40df08fe31d6b3bc444afe229a914e
[ "Apache-2.0" ]
null
null
null
umigame/nlp/labelling.py
penguinwang96825/Umigame
98d647ab6f40df08fe31d6b3bc444afe229a914e
[ "Apache-2.0" ]
null
null
null
umigame/nlp/labelling.py
penguinwang96825/Umigame
98d647ab6f40df08fe31d6b3bc444afe229a914e
[ "Apache-2.0" ]
1
2021-11-01T14:35:32.000Z
2021-11-01T14:35:32.000Z
import math import numpy as np import pandas as pd def fixed_time_horizon(df, column='close', lookback=20): """ Fixed-time Horizon As it relates to finance, virtually all ML papers label observations using the fixed-time horizon method. Fixed-time horizon is presented as one of the main procedures to label data when it comes to processing financial time series for machine learning. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://mlfinlab.readthedocs.io/en/latest/labeling/labeling_fixed_time_horizon.html 2. https://arxiv.org/pdf/1603.08604.pdf 3. https://quantdare.com/4-simple-ways-to-label-financial-data-for-machine-learning/ 4. De Prado, Advances in financial machine learning, 2018 5. Dixon et al., Classification-based financial markets prediction using deep neural networks, 2017 """ price = df[column] label = (price.shift(-lookback) / price > 1).astype(int) return label def triple_barrier(df, column='close', ub=0.07, lb=0.03, lookback=20, binary_classification=True): """ Triple Barrier The idea is to consider the full dynamics of a trading strategy and not a simple performance proxy. The rationale for this extension is that often money managers implement P&L triggers that cash in when gains are sufficient or opt out to stop their losses. Upon inception of the strategy, three barriers are fixed (De Prado, 2018). Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." ub: float It stands for upper bound, e.g. 0.07 is a 7% profit taking. lb: float It stands for lower bound, e.g. 0.03 is a 3% stop loss. lookback: str Maximum holding time. References ---------- 1. https://www.finlab.tw/generate-labels-stop-loss-stop-profit/ 2. http://www.mlfactor.com/Data.html#the-triple-barrier-method 3. https://chrisconlan.com/calculating-triple-barrier-labels-from-advances-in-financial-machine-learning/ 4. https://towardsdatascience.com/financial-machine-learning-part-1-labels-7eeed050f32e 5. De Prado, Advances in financial machine learning, 2018 """ ub = 1 + ub lb = 1- lb r = np.array(range(lookback)) price = df[column] p = price.rolling(lookback).apply(end_price, raw=True).shift(-lookback+1) t = price.rolling(lookback).apply(end_time, raw=True).shift(-lookback+1) t = pd.Series( [t.index[int(k+i)] if not math.isnan(k+i) else np.datetime64('NaT') for i, k in enumerate(t)], index=t.index ).dropna() label = pd.Series(0, p.index) label.loc[p > ub] = 1 label.loc[p < lb] = -1 if binary_classification: label = np.where(label == 1, 1, 0) return pd.Series(label, index=price.index) def get_continuous_trading_signals(df, column='close', lookahead=5): """ Continuous Trading Signal A hybrid stock trading framework integrating technical analysis with machine learning techniques. Parameters ---------- df: pd.DataFrame column: str Choose from "open", "high", "low", and "close." lookahead: str The number of days to look ahead. References ---------- 1. https://translateyar.ir/wp-content/uploads/2020/05/1-s2.0-S2405918815300179-main-1.pdf 2. Dash and Dash, A hybrid stock trading framework integrating technical analysis with machine learning techniques, 2016 """ price = df.data[column] OTr = [] trends = [] for idx in range(len(price)-lookahead+1): arr_window = price[idx:(idx+lookahead)] if price[idx+lookahead-1] > price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 + 0.5 elif price[idx+lookahead-1] <= price[idx]: coef = (price[idx+lookahead-1]-min(arr_window)) / (max(arr_window)-min(arr_window)) y_t = coef * 0.5 OTr.append(y_t) OTr = np.append(OTr, np.zeros(shape=(len(price)-len(OTr)))) trends = (OTr >= np.mean(OTr)).astype(int) return pd.Series(OTr, index=price.index), pd.Series(trends, index=price.index)
37.508197
124
0.647072
be0d1242d33adfcfc290ba70e3637aa993c895e3
4,164
py
Python
mayan/apps/converter/api.py
Dave360-crypto/mayan-edms
9cd37537461347f79ff0429e4b8b16fd2446798d
[ "Apache-2.0" ]
3
2020-02-03T11:58:51.000Z
2020-10-20T03:52:21.000Z
mayan/apps/converter/api.py
Dave360-crypto/mayan-edms
9cd37537461347f79ff0429e4b8b16fd2446798d
[ "Apache-2.0" ]
null
null
null
mayan/apps/converter/api.py
Dave360-crypto/mayan-edms
9cd37537461347f79ff0429e4b8b16fd2446798d
[ "Apache-2.0" ]
2
2020-10-24T11:10:06.000Z
2021-03-03T20:05:38.000Z
from __future__ import absolute_import import hashlib import logging import os from django.utils.encoding import smart_str from common.conf.settings import TEMPORARY_DIRECTORY from common.utils import fs_cleanup from .exceptions import OfficeConversionError, UnknownFileFormat from .literals import (DEFAULT_PAGE_NUMBER, DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT) from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE, TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR, FILE_FORMATS) from .runtime import backend, office_converter HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest() logger = logging.getLogger(__name__)
32.53125
180
0.68828
be0d8286d98d561dd73b8ad4757e80b16c93f068
2,798
py
Python
LogisticRegression/learn.py
ValYouW/DeepLearningCourse
d7d9edc60075f9078ec3f41074c958eaa7854964
[ "MIT" ]
null
null
null
LogisticRegression/learn.py
ValYouW/DeepLearningCourse
d7d9edc60075f9078ec3f41074c958eaa7854964
[ "MIT" ]
null
null
null
LogisticRegression/learn.py
ValYouW/DeepLearningCourse
d7d9edc60075f9078ec3f41074c958eaa7854964
[ "MIT" ]
null
null
null
import numpy as np import pandas as pd import matplotlib.pyplot as plt import utils if __name__ == '__main__': main()
32.534884
105
0.605075
be0d8c6e88406117103733f22d2fc8dd5f14eae8
30,231
py
Python
ignite/handlers/time_profilers.py
iamhardikat11/ignite
0666b407f7cdba81842014c6026e33b66113bb94
[ "BSD-3-Clause" ]
4,119
2017-11-23T18:10:37.000Z
2022-03-31T05:31:27.000Z
ignite/handlers/time_profilers.py
iamhardikat11/ignite
0666b407f7cdba81842014c6026e33b66113bb94
[ "BSD-3-Clause" ]
1,838
2017-11-24T11:19:25.000Z
2022-03-31T09:08:18.000Z
ignite/handlers/time_profilers.py
iamhardikat11/ignite
0666b407f7cdba81842014c6026e33b66113bb94
[ "BSD-3-Clause" ]
691
2017-11-24T10:57:33.000Z
2022-03-29T02:19:44.000Z
import functools from collections import OrderedDict from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast import torch from ignite.engine import Engine, EventEnum, Events from ignite.handlers.timing import Timer
38.412961
119
0.582978
be0e7ba87c886d267ec11352e01c184c5af3e8dc
9,671
py
Python
bellmanford.py
asmodehn/aiokraken
b260bd41d5aa091e6a4f1818328426fbe6f625c0
[ "MIT" ]
null
null
null
bellmanford.py
asmodehn/aiokraken
b260bd41d5aa091e6a4f1818328426fbe6f625c0
[ "MIT" ]
82
2019-08-30T09:37:49.000Z
2022-03-29T14:53:22.000Z
bellmanford.py
asmodehn/aiokraken
b260bd41d5aa091e6a4f1818328426fbe6f625c0
[ "MIT" ]
null
null
null
""" Bellman Ford Arbitrage implementation over websocket API. """ from __future__ import annotations from collections import namedtuple from datetime import datetime from decimal import Decimal from math import log import pandas as pd import numpy as np import asyncio import typing from aiokraken.model.assetpair import AssetPair from aiokraken.rest import AssetPairs, Assets from aiokraken.model.asset import Asset from aiokraken.rest.client import RestClient from aiokraken.websockets.publicapi import ticker import networkx as nx client = RestClient() def test_pricematrix_mapping(): # testing with string for simplicity for now pm = PriceMatrix(["EUR", "BTC"]) pm["EUR"]["BTC"] = Decimal(1.234) pm["BTC"]["EUR"] = Decimal(4.321) assert pm["EUR"]["BTC"] == Decimal(1.234) assert pm["BTC"]["EUR"] == Decimal(4.321) if __name__ == '__main__': asyncio.run(arbiter(user_assets=["XTZ", "ETH", "XBT", "EUR"]), debug=True)
39.798354
156
0.58722