hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
863b8cb7916efe3b96226a5e47386ab45aa3a5f0 | 9,162 | py | Python | examples/solar/p25_nonsparse_cmmgp.py | axdahl/SC-MMGP | c6cd9d9de66bb7074925a4b6485f10a74bdd9f68 | [
"Apache-2.0"
] | null | null | null | examples/solar/p25_nonsparse_cmmgp.py | axdahl/SC-MMGP | c6cd9d9de66bb7074925a4b6485f10a74bdd9f68 | [
"Apache-2.0"
] | null | null | null | examples/solar/p25_nonsparse_cmmgp.py | axdahl/SC-MMGP | c6cd9d9de66bb7074925a4b6485f10a74bdd9f68 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Script to execute example covarying MMGP regression forecasting model
with full Krhh.
Inputs: Data training and test sets (dictionary pickle)
Data for example:
- normalised solar data for 25 sites for 15 minute forecast
- N_train = 4200, N_test = 2276, P = 25, D = 51
- Xtr[:, :50] 2 recent lagged observations for each site in order
- Xtr[:, 50] time index
- link inputs is a 25x2 array (link inputs repeated for every group)
with normalised lat,long for each site in order
Model Options:
- Sparse or full x-function covariance prior Krhh (set bool SPARSE_PRIOR)
- Diagonal or Kronecker-structured variational posterior covariance Sr (set bool DIAG_POST)
- Sparse or full posterior covariance (when Kronecker posterior; set bool SPARSE_POST)
Current Settings (sparse covarying mmgp model with sparse Kronecker posterior):
DIAG_POST = False
SPARSE_PRIOR = False # set True for equivalent sparse scmmgp model
SPARSE_POST = True
Note on specifying group structure for F:
Grouping occurs via block_struct, a nested list of grouping order
Where functions [i] are independent i.e. in own block, set link_kernel[i] = link_inputs[i] = 1.0
See model class preamble and example below for further details.
"""
import os
import numpy as np
import pickle
import pandas as pd
import traceback
import time
import sklearn.cluster
import csv
import sys
import mmgp
from mmgp import likelihoods
from mmgp import kernels
import tensorflow as tf
from mmgp import datasets
from mmgp import losses
from mmgp import util
dpath = '/experiments/datasets/'
dfile = 'p25_inputsdict.pickle'
dlinkfile = 'p25_linkinputsarray.pickle'
outdir = '/experiments/results/p25_nonsparse_cmmgp/'
try:
os.makedirs(outdir)
except FileExistsError:
pass
def get_inputs():
"""
inputsdict contains {'Yte': Yte, 'Ytr': Ytr, 'Xtr': Xtr, 'Xte': Xte} where values are np.arrays
np. arrays are truncated to evenly split into batches of size = batchsize
returns inputsdict, Xtr_link (ndarray, shape = [P, D_link_features])
"""
with open(os.path.join(dpath, dfile), 'rb') as f:
d_all = pickle.load(f)
with open(os.path.join(dpath, dlinkfile), 'rb') as f:
d_link = pickle.load(f)
return d_all, d_link
FLAGS = util.util.get_flags()
BATCH_SIZE = FLAGS.batch_size
LEARNING_RATE = FLAGS.learning_rate
DISPLAY_STEP = FLAGS.display_step
EPOCHS = FLAGS.n_epochs
NUM_SAMPLES = FLAGS.mc_train
PRED_SAMPLES = FLAGS.mc_test
NUM_INDUCING = FLAGS.n_inducing
NUM_COMPONENTS = FLAGS.num_components
IS_ARD = FLAGS.is_ard
TOL = FLAGS.opt_tol
VAR_STEPS = FLAGS.var_steps
DIAG_POST = False
SPARSE_PRIOR = False
SPARSE_POST = True # option for non-diag post
MAXTIME = 1200
print("settings done")
# define GPRN P and Q
output_dim = 25 #P
node_dim = 25 #Q
lag_dim = 2
save_nlpds = False # If True saves samples of nlpds for n,p,s
# extract dataset
d, d_link = get_inputs()
Ytr, Yte, Xtr, Xte = d['Ytr'], d['Yte'], d['Xtr'], d['Xte']
data = datasets.DataSet(Xtr.astype(np.float32), Ytr.astype(np.float32), shuffle=False)
test = datasets.DataSet(Xte.astype(np.float32), Yte.astype(np.float32), shuffle=False)
print("dataset created")
# model config block rows (where P=Q): block all w.1, w.2 etc, leave f independent
# order of block_struct is rows, node functions
# lists required: block_struct, link_inputs, kern_link, kern
#block_struct nested list of grouping order
weight_struct = [[] for _ in range(output_dim)]
for i in range(output_dim):
row = list(range(i, i+output_dim*(node_dim-1)+1, output_dim))
row_0 = row.pop(i) # bring diag to pivot position
weight_struct[i] = [row_0] + row
nodes = [[x] for x in list(range(output_dim * node_dim, output_dim * node_dim + output_dim))]
block_struct = weight_struct + nodes
# create link inputs (link inputs used repeatedly but can have link input per group)
# permute to bring diagonal to first position
link_inputs = [[] for _ in range(output_dim)]
for i in range(output_dim):
idx = list(range(d_link.shape[0]))
link_inputs[i] = d_link[[idx.pop(i)] + idx, :]
link_inputs = link_inputs + [1.0 for i in range(output_dim)] # for full W row blocks, independent nodes
# create 'between' kernel list
klink_rows = [kernels.CompositeKernel('mul',[kernels.RadialBasis(2, std_dev=2.0, lengthscale=1.0, white=0.01, input_scaling = IS_ARD),
kernels.CompactSlice(2, active_dims=[0,1], lengthscale = 2.0, input_scaling = IS_ARD)] )
for i in range(output_dim) ]
klink_f = [1.0 for i in range(node_dim)]
kernlink = klink_rows + klink_f
# create 'within' kernel
# kern
lag_active_dims_s = [ [] for _ in range(output_dim)]
for i in range(output_dim):
lag_active_dims_s[i] = list(range(lag_dim*i, lag_dim*(i+1)))
k_rows = [kernels.CompositeKernel('mul',[kernels.RadialBasisSlice(lag_dim, active_dims=lag_active_dims_s[i],
std_dev = 1.0, white = 0.01, input_scaling = IS_ARD),
kernels.PeriodicSliceFixed(1, active_dims=[Xtr.shape[1]-1],
lengthscale=0.5, std_dev=1.0, period = 144) ])
for i in range(output_dim)]
k_f = [kernels.RadialBasisSlice(lag_dim, active_dims=lag_active_dims_s[i], std_dev = 1.0, white = 0.01, input_scaling = IS_ARD)
for i in range(output_dim)]
kern = k_rows + k_f
print('len link_inputs ',len(link_inputs))
print('len kernlink ',len(kernlink))
print('len kern ', len(kern))
print('no. groups = ', len(block_struct), 'no. latent functions =', len([i for b in block_struct for i in b]))
print('number latent functions', node_dim*(output_dim+1))
likelihood = likelihoods.CovaryingRegressionNetwork(output_dim, node_dim, std_dev = 0.2) # p, q, lik_noise
print("likelihood and kernels set")
Z = init_z(data.X, NUM_INDUCING)
print('inducing points set')
m = mmgp.ExplicitSCMMGP(output_dim, likelihood, kern, kernlink, block_struct, Z, link_inputs,
num_components=NUM_COMPONENTS, diag_post=DIAG_POST, sparse_prior=SPARSE_PRIOR,
sparse_post=SPARSE_POST, num_samples=NUM_SAMPLES, predict_samples=PRED_SAMPLES)
print("model set")
# initialise losses and logging
error_rate = losses.RootMeanSqError(data.Dout)
os.chdir(outdir)
with open("log_results.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'fit_runtime', 'nelbo', error_rate.get_name(),'generalised_nlpd'])
with open("log_params.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'raw_kernel_params', 'raw_kernlink_params', 'raw_likelihood_params', 'raw_weights'])
with open("log_comp_time.csv", 'w', newline='') as f:
csv.writer(f).writerow(['epoch', 'batch_time', 'nelbo_time', 'pred_time', 'gen_nlpd_time', error_rate.get_name()+'_time'])
# optimise
o = tf.train.AdamOptimizer(LEARNING_RATE, beta1=0.9,beta2=0.99)
print("start time = ", time.strftime('%X %x %Z'))
m.fit(data, o, var_steps = VAR_STEPS, epochs = EPOCHS, batch_size = BATCH_SIZE, display_step=DISPLAY_STEP,
test = test, loss = error_rate, tolerance = TOL, max_time=MAXTIME )
print("optimisation complete")
# export final predicted values and loss metrics
ypred = m.predict(test.X, batch_size = BATCH_SIZE) #same batchsize used for convenience
np.savetxt("predictions.csv", np.concatenate(ypred, axis=1), delimiter=",")
if save_nlpds == True:
nlpd_samples, nlpd_meanvar = m.nlpd_samples(test.X, test.Y, batch_size = BATCH_SIZE)
try:
np.savetxt("nlpd_meanvar.csv", nlpd_meanvar, delimiter=",") # N x 2P as for predictions
except:
print('nlpd_meanvar export fail')
try:
np.savetxt("nlpd_samples.csv", nlpd_samples, delimiter=",") # NP x S (NxS concat for P tasks)
except:
print('nlpd_samples export fail')
print("Final " + error_rate.get_name() + "=" + "%.4f" % error_rate.eval(test.Y, ypred[0]))
print("Final " + "generalised_nlpd" + "=" + "%.4f" % m.nlpd_general(test.X, test.Y, batch_size = BATCH_SIZE))
error_rate_end = [losses.MeanAbsError(data.Dout)] # any extra accuracy measures at end of routine
print("Final ", [e.get_name() for e in error_rate_end])
print([e.eval(test.Y, ypred[0]) for e in error_rate_end])
predvar = [np.mean(np.mean(ypred[1]))]
print("Final predvar ", predvar)
with open("final_losses.csv", 'w', newline='') as f:
csv.writer(f).writerows([[e.get_name() for e in error_rate_end] + ['pred_var'],
[e.eval(test.Y, ypred[0]) for e in error_rate_end] + predvar])
print("finish time = " + time.strftime('%X %x %Z'))
| 39.662338 | 135 | 0.685986 |
863bc130193c72b67b65cca8c77628d45f1d2148 | 11,072 | py | Python | cruiser-lib/test/positioning/test_position_hl_commander.py | cfreebuf/kubeedge-examples | 9b2ab402c33546215a0a9e02e92f5b0aa88bcff9 | [
"Apache-2.0"
] | null | null | null | cruiser-lib/test/positioning/test_position_hl_commander.py | cfreebuf/kubeedge-examples | 9b2ab402c33546215a0a9e02e92f5b0aa88bcff9 | [
"Apache-2.0"
] | null | null | null | cruiser-lib/test/positioning/test_position_hl_commander.py | cfreebuf/kubeedge-examples | 9b2ab402c33546215a0a9e02e92f5b0aa88bcff9 | [
"Apache-2.0"
] | 1 | 2019-12-02T01:00:18.000Z | 2019-12-02T01:00:18.000Z | # -*- coding: utf-8 -*-
#
# || ____ _ __
# +------+ / __ )(_) /_______________ _____ ___
# | 0xBC | / __ / / __/ ___/ ___/ __ `/_ / / _ \
# +------+ / /_/ / / /_/ /__/ / / /_/ / / /_/ __/
# || || /_____/_/\__/\___/_/ \__,_/ /___/\___/
#
# Copyright (C) 2018 Bitcraze AB
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
import math
import sys
import unittest
from cflib.crazyflie import Crazyflie
from cflib.crazyflie import HighLevelCommander
from cflib.crazyflie import Param
from cflib.positioning.position_hl_commander import PositionHlCommander
if sys.version_info < (3, 3):
from mock import MagicMock, patch, call
else:
from unittest.mock import MagicMock, patch, call
if __name__ == '__main__':
unittest.main()
| 29.525333 | 74 | 0.587157 |
863c0ed7e6b8dca169d56f2c58a602b033d4bb29 | 6,232 | py | Python | onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py | NaomiatLibrary/OpenNMT-kpg-release | 1da3468d7dad22529a77f3526abf9b373bd3dc4c | [
"MIT"
] | 152 | 2019-10-07T03:15:53.000Z | 2022-03-24T16:26:26.000Z | onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py | NaomiatLibrary/OpenNMT-kpg-release | 1da3468d7dad22529a77f3526abf9b373bd3dc4c | [
"MIT"
] | 46 | 2019-11-04T09:51:51.000Z | 2022-03-06T18:40:13.000Z | onmt/keyphrase/pke/unsupervised/graph_based/expandrank.py | NaomiatLibrary/OpenNMT-kpg-release | 1da3468d7dad22529a77f3526abf9b373bd3dc4c | [
"MIT"
] | 28 | 2019-11-04T02:02:23.000Z | 2021-12-29T06:10:04.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Florian Boudin
# Date: 10-02-2018
"""ExpandRank keyphrase extraction model.
Graph-based ranking approach to keyphrase extraction described in:
* Xiaojun Wan and Jianguo Xiao.
Single Document Keyphrase Extraction Using Neighborhood Knowledge.
*In proceedings of AAAI*, pages 855-860, 2008.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from onmt.keyphrase.pke.unsupervised import SingleRank
from onmt.keyphrase.pke.base import LoadFile
import networkx as nx
import logging
| 37.542169 | 80 | 0.574294 |
863da6290f58b36a91968cc8314fc22813d11b6c | 210 | py | Python | 5-serverless-xray-stack/app.py | mmeidlinger/cdk-microservices-labs | a646c05d4bb0950f9915f9b15f810e82ba8d4e9a | [
"MIT-0"
] | 14 | 2020-03-26T18:35:50.000Z | 2022-03-20T14:49:54.000Z | 5-serverless-xray-stack/app.py | mmeidlinger/cdk-microservices-labs | a646c05d4bb0950f9915f9b15f810e82ba8d4e9a | [
"MIT-0"
] | 1 | 2021-11-23T01:20:13.000Z | 2021-11-23T01:20:13.000Z | 5-serverless-xray-stack/app.py | mmeidlinger/cdk-microservices-labs | a646c05d4bb0950f9915f9b15f810e82ba8d4e9a | [
"MIT-0"
] | 14 | 2020-04-15T12:30:14.000Z | 2022-03-26T20:47:35.000Z | #!/usr/bin/env python3
from aws_cdk import core
from fagate_serverless.fagate_serverless_stack import FagateServerlessStack
app = core.App()
FagateServerlessStack(app, "serverless-xray-stack")
app.synth()
| 17.5 | 75 | 0.804762 |
863daa6816d11bde4f87896d1cc47d06ece1f0db | 5,066 | py | Python | dash/long_callback/managers/celery_manager.py | nickmelnikov82/dash | e774908da770bee83f3213e0307c27ed8a40500e | [
"MIT"
] | 17,143 | 2015-07-14T17:19:05.000Z | 2022-03-31T10:03:39.000Z | dash/long_callback/managers/celery_manager.py | nickmelnikov82/dash | e774908da770bee83f3213e0307c27ed8a40500e | [
"MIT"
] | 1,630 | 2015-11-17T22:15:41.000Z | 2022-03-31T09:15:07.000Z | dash/long_callback/managers/celery_manager.py | nickmelnikov82/dash | e774908da770bee83f3213e0307c27ed8a40500e | [
"MIT"
] | 1,970 | 2015-07-12T07:05:14.000Z | 2022-03-30T19:58:09.000Z | import json
import inspect
import hashlib
from _plotly_utils.utils import PlotlyJSONEncoder
from dash.long_callback.managers import BaseLongCallbackManager
| 34.937931 | 102 | 0.651402 |
863dcb2e53c2cd1e93015fc8efa9a5e953801c7f | 2,027 | py | Python | libraries/botframework-connector/botframework/connector/token_api/_token_api_client.py | victor-kironde/botbuilder-python | e893d9b036d7cf33cf9c9afd1405450c354cdbcd | [
"MIT"
] | 10 | 2019-05-11T18:07:14.000Z | 2021-08-20T03:02:47.000Z | libraries/botframework-connector/botframework/connector/token_api/_token_api_client.py | Fortune-Adekogbe/botbuilder-python | 4e48c874c32a2a7fe7f27a7a1f825e2aa39466c4 | [
"MIT"
] | 13 | 2020-09-05T11:06:05.000Z | 2020-10-29T05:01:19.000Z | botframework_connector-4.11.0/botframework/connector/token_api/_token_api_client.py | opsdroid/wheels-for-teams-connector | c283bb5ab95a4c3d4023767d53d2686aa531f372 | [
"Apache-2.0"
] | 18 | 2019-08-19T12:11:00.000Z | 2021-10-12T09:36:27.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.service_client import SDKClient
from msrest import Serializer, Deserializer
from ._configuration import TokenApiClientConfiguration
from .operations import BotSignInOperations
from .operations import UserTokenOperations
from . import models
| 36.196429 | 82 | 0.683769 |
863df256ad1c4ecc4568ea47ce82f56d31bf2e46 | 69 | py | Python | soppi/sample.py | shikshan/soppi | 007f654b0e9fe4bf7fc09e967615cb205a67dbaa | [
"MIT"
] | null | null | null | soppi/sample.py | shikshan/soppi | 007f654b0e9fe4bf7fc09e967615cb205a67dbaa | [
"MIT"
] | null | null | null | soppi/sample.py | shikshan/soppi | 007f654b0e9fe4bf7fc09e967615cb205a67dbaa | [
"MIT"
] | null | null | null | # content of test_sample.py
| 17.25 | 27 | 0.623188 |
863e8a2ed0006f7150de09f27d406b39ae986ad3 | 827 | py | Python | saleor/order/migrations/0081_auto_20200406_0456.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 15,337 | 2015-01-12T02:11:52.000Z | 2021-10-05T19:19:29.000Z | saleor/order/migrations/0081_auto_20200406_0456.py | fairhopeweb/saleor | 9ac6c22652d46ba65a5b894da5f1ba5bec48c019 | [
"CC-BY-4.0"
] | 7,486 | 2015-02-11T10:52:13.000Z | 2021-10-06T09:37:15.000Z | saleor/order/migrations/0081_auto_20200406_0456.py | aminziadna/saleor | 2e78fb5bcf8b83a6278af02551a104cfa555a1fb | [
"CC-BY-4.0"
] | 5,864 | 2015-01-16T14:52:54.000Z | 2021-10-05T23:01:15.000Z | # Generated by Django 3.0.4 on 2020-04-06 09:56
from django.db import migrations
from saleor.order import OrderStatus
| 25.060606 | 63 | 0.665054 |
863f656903c4148e82b3b4fd5343ee724e111ab6 | 3,469 | py | Python | function/python/brightics/function/textanalytics/regex.py | jhpark428/studio | 539457b3026dda827c1b17b4cb851946e34e3b85 | [
"Apache-2.0"
] | 202 | 2018-10-23T04:37:35.000Z | 2022-01-27T05:51:10.000Z | function/python/brightics/function/textanalytics/regex.py | sagarmk/studio | 3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75 | [
"MIT"
] | 444 | 2018-11-07T08:41:14.000Z | 2022-03-16T06:48:57.000Z | function/python/brightics/function/textanalytics/regex.py | sagarmk/studio | 3bc547fdf85ae6be80c1b40916f9f5d31d2b3f75 | [
"MIT"
] | 99 | 2018-11-08T04:12:13.000Z | 2022-03-30T05:36:27.000Z | """
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.utils import check_required_parameters
from brightics.common.exception import BrighticsFunctionException
from .data import regex_format_dict
import re
| 40.811765 | 120 | 0.643701 |
863fffaabccfedecd9149dc35acae6b9542aa04c | 8,500 | py | Python | bin/temperature_functions.py | travc/outbreak-reporter | 0f03ca66993827ae1866d09e3cf5d9f6d4acb633 | [
"MIT"
] | null | null | null | bin/temperature_functions.py | travc/outbreak-reporter | 0f03ca66993827ae1866d09e3cf5d9f6d4acb633 | [
"MIT"
] | 2 | 2019-12-15T19:58:26.000Z | 2019-12-17T05:33:32.000Z | bin/temperature_functions.py | travc/outbreak-reporter | 0f03ca66993827ae1866d09e3cf5d9f6d4acb633 | [
"MIT"
] | 1 | 2022-03-04T01:36:38.000Z | 2022-03-04T01:36:38.000Z | #!/usr/bin/env python3
import sys
import os
import logging
import numpy as np
import pandas as pd
import dateutil
# Function which computes BM (single sine method) degree day generation from temperature data
def compute_year_over_year_norm(in_dataframe,
start, end,
norm_start=None, norm_end=None,
freq='daily',
interp_method='linear',
norm_method='mean'):
"""
Parameters
----------
start: convertable to Datetime
start range of dates to output
end: convertable to Datetime
end range of dates to output
norm_start : convertable to Datetime or None
`None` will use in_dataframe.index[0]
norm_end : convertable to Datetime or None
if given (not None), output range does not include `norm_end` (it is half-open)
`None` will use in_dataframe.index[-1]
freq : {'daily', 'hourly'}
interp_method : str or None
`None` will skip resample and interpolation, so
`in_dataframe` must already be daily or hourly (depending on `freq`)!
norm_method : {'mean', 'median'}
"""
if freq == 'hourly':
hrs = 24
hrs_freq = '1h'
elif freq == 'daily':
hrs = 1
hrs_freq = '24h'
else:
raise ValueError("Invalid `freq` argument value: {}".format(freq))
if norm_start is None:
norm_start = in_dataframe.index[0]
if norm_end is None:
norm_end = in_dataframe.index[-1]
else:
norm_end = pd.to_datetime([norm_end])[0] - pd.Timedelta('1 second')
print('Computing using range:', norm_start, 'to', norm_end)
if interp_method is None: # skip resample+interpolation (assumes in_dataframe is daily!)
t = in_dataframe.loc[norm_start:norm_end]
else: # resample and interpolate to get hourly
t = in_dataframe.resample(hrs_freq).interpolate(method=interp_method).loc[norm_start:norm_end]
if norm_method == 'mean':
norm = t.groupby([t.index.month, t.index.day, t.index.hour]).mean().sort_index()
elif norm_method == 'median':
norm = t.groupby([t.index.month, t.index.day, t.index.hour]).median().sort_index()
else:
assert False, "Error: Unknown norm_method '{}'".format(norm_method)
# now replicate and trim to the desired output range
start = pd.to_datetime(start)
end = pd.to_datetime(end)
# need a non-leapyear and leapyear version
norm_ly = norm.copy()
if norm.shape[0] == 366*hrs:
norm = norm.drop((2,29,))
else: # norm doesn't include any leapyear data
assert norm.shape[0] == 365*hrs
# make Feb 29 the mean of Feb 28 and Mar 1
foo = (norm.loc[(2,28,)] + norm.loc[(3,1,)]) / 2.0
foo.index = pd.MultiIndex.from_product( ([2],[29],list(range(hrs))) )
norm_ly = pd.concat((norm_ly,foo)).sort_index()
norm_ly.sort_index(inplace=True) # probably not needed
# build up a 'long normal' (lnorm) dataframe year by year by appending the norm or norm_ly
lnorm = None
for yr in np.arange(start.year, end.year+1):
#print(yr)
idx = pd.date_range(start='{}-{:02d}-{:02d} {:02d}:00:00'.format(yr,*norm.index[0]),
end= '{}-{:02d}-{:02d} {:02d}:00:00'.format(yr,*norm.index[-1]),
freq=hrs_freq)
if idx.shape[0] == 366*hrs:
foo = norm_ly.copy()
else:
assert norm.shape[0] == 365*hrs
foo = norm.copy()
foo.index = idx
if lnorm is None:
lnorm = foo
else:
lnorm = lnorm.append(foo)
return lnorm.loc[start:end]
| 41.062802 | 132 | 0.598 |
864003328f8b49eae739c102dea7da6313ecab13 | 2,584 | py | Python | applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 778 | 2017-01-27T16:29:17.000Z | 2022-03-30T03:01:51.000Z | applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 6,634 | 2017-01-15T22:56:13.000Z | 2022-03-31T15:03:36.000Z | applications/CSharpWrapperApplication/tests/test_CSharpWrapperApplication.py | lkusch/Kratos | e8072d8e24ab6f312765185b19d439f01ab7b27b | [
"BSD-4-Clause"
] | 224 | 2017-02-07T14:12:49.000Z | 2022-03-06T23:09:34.000Z | # import Kratos
import KratosMultiphysics
import KratosMultiphysics.StructuralMechanicsApplication as StructuralMechanicsApplication
import KratosMultiphysics.CSharpWrapperApplication as CSharpWrapperApplication
import run_cpp_unit_tests
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import subprocess
import subprocess
# Using kratos_utilities
import KratosMultiphysics.kratos_utilities as kratos_utilities
if kratos_utilities.CheckIfApplicationsAvailable("ExternalSolversApplication"):
has_external_solvers_application = True
else:
has_external_solvers_application = False
# Import the tests o test_classes to create the suits
## SMALL TESTS
## NIGTHLY TESTS
## VALIDATION TESTS
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should pupulate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
# Create a test suit with the selected tests (Small tests):
smallSuite = suites['small']
# Create a test suit with the selected tests plus all small tests
nightlySuite = suites['nightly']
### BEGIN SMALL SUITE ###
### END SMALL SUITE ###
### BEGIN NIGHTLY SUITE ###
### END VALIDATION SUITE ###
### BEGIN VALIDATION SUITE ###
# For very long tests that should not be in nighly and you can use to validate
validationSuite = suites['validation']
validationSuite.addTests(nightlySuite)
### END VALIDATION ###
# Create a test suit that contains all the tests:
allSuite = suites['all']
allSuite.addTests(nightlySuite) # Already contains the smallSuite
validationSuite.addTests(allSuite) # Validation contains all
# Manual list for debugging
#allSuite.addTests(
#KratosUnittest.TestLoader().loadTestsFromTestCases([
#### STANDALONE
#### SMALL
#### NIGTHLY
#### VALIDATION
#])
#)
return suites
if __name__ == '__main__':
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning cpp unit tests ...")
run_cpp_unit_tests.run()
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished running cpp unit tests!")
KratosMultiphysics.Logger.PrintInfo("Unittests", "\nRunning python tests ...")
KratosUnittest.runTests(AssembleTestSuites())
KratosMultiphysics.Logger.PrintInfo("Unittests", "Finished python tests!")
| 29.363636 | 90 | 0.71904 |
8640b1d90a7dd7eb0d598131b239f974e433d2eb | 1,121 | py | Python | backend/api/models.py | mezidia/mezidia-airlines-backend | bc0b27b785f45ac83552f7fbb879cd977171c2fc | [
"MIT"
] | null | null | null | backend/api/models.py | mezidia/mezidia-airlines-backend | bc0b27b785f45ac83552f7fbb879cd977171c2fc | [
"MIT"
] | null | null | null | backend/api/models.py | mezidia/mezidia-airlines-backend | bc0b27b785f45ac83552f7fbb879cd977171c2fc | [
"MIT"
] | null | null | null | from sqlalchemy import Column, Integer, String, ForeignKey, Float
from sqlalchemy.orm import relationship
from .database import Base
| 29.5 | 70 | 0.716325 |
86421594cd7a65136f722a4e9889059ae90e1d77 | 2,145 | py | Python | run.py | Ganeshrockz/Flask-Python-Dev | 522b280484e8f4cf3877b378a1334c501ffbc41e | [
"Apache-2.0"
] | null | null | null | run.py | Ganeshrockz/Flask-Python-Dev | 522b280484e8f4cf3877b378a1334c501ffbc41e | [
"Apache-2.0"
] | null | null | null | run.py | Ganeshrockz/Flask-Python-Dev | 522b280484e8f4cf3877b378a1334c501ffbc41e | [
"Apache-2.0"
] | null | null | null | from flask import Flask, flash, render_template, redirect, url_for
from flask.ext.pymongo import PyMongo
from flask import request
app=Flask(__name__)
app.config['MONGO_DBNAME']='stud'
app.config['MONGO_URI']='mongodb://localhost:27017/stud'
mongo=PyMongo(app)
"""
@app.route('/add')
def add():
user=mongo.db.users
user.insert({"name":"Ganesh","age":19})
return "Added"
@app.route('/find')
def find():
user=mongo.db.users
data=user.find_one({"name":"Ganesh"})
return data["name"]
"""
if __name__ == '__main__':
app.secret_key = 'ganeshrockz'
app.run(debug=True)
| 35.75 | 213 | 0.620513 |
86448f12322f6a8ff13f239dbc2163cdebce1c56 | 12,198 | py | Python | resources/tests/conftest.py | jussiarpalahti/respa | c308bcb96e56d9401e22df94d3073e248618e243 | [
"MIT"
] | null | null | null | resources/tests/conftest.py | jussiarpalahti/respa | c308bcb96e56d9401e22df94d3073e248618e243 | [
"MIT"
] | null | null | null | resources/tests/conftest.py | jussiarpalahti/respa | c308bcb96e56d9401e22df94d3073e248618e243 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
import datetime
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework.test import APIClient, APIRequestFactory
from resources.enums import UnitAuthorizationLevel
from resources.models import Resource, ResourceType, Unit, Purpose, Day, Period
from resources.models import Equipment, EquipmentAlias, ResourceEquipment, EquipmentCategory, TermsOfUse, ResourceGroup
from resources.models import AccessibilityValue, AccessibilityViewpoint, ResourceAccessibility, UnitAccessibility
from munigeo.models import Municipality
| 29.606796 | 119 | 0.702492 |
86468125b6e8c3a2e71c1dfdfd2e29f1c5b2af19 | 586 | py | Python | qcmetadataprinter/struct.py | x2dev/device_leeco_x2 | 9bf4549b5f64390ca4da291745b2a66a8e3f006e | [
"FTL"
] | null | null | null | qcmetadataprinter/struct.py | x2dev/device_leeco_x2 | 9bf4549b5f64390ca4da291745b2a66a8e3f006e | [
"FTL"
] | null | null | null | qcmetadataprinter/struct.py | x2dev/device_leeco_x2 | 9bf4549b5f64390ca4da291745b2a66a8e3f006e | [
"FTL"
] | null | null | null | #!/bin/python3
with open('../camera/QCamera2/stack/common/cam_intf.h', 'r') as f:
data = f.read()
f.closed
start = data.find(' INCLUDE(CAM_INTF_META_HISTOGRAM')
end = data.find('} metadata_data_t;')
data = data[start:end]
metadata = data.split("\n")
metalist = list()
for line in metadata:
if (line.startswith(' INCLUDE')):
foo = line.split(',')
foo[0] = foo[0].replace('INCLUDE', 'PRINT')
metalist.append(foo[0] + ", pMetadata);")
with open('list.txt', 'w') as f:
for item in metalist:
f.write("%s\n" % item)
f.closed
| 23.44 | 66 | 0.593857 |
8647521d4f7b0429f689d687206113be1ffbd603 | 317 | py | Python | abc/abc121/abc121d-2.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc121/abc121d-2.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc121/abc121d-2.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null |
A, B = map(int, input().split())
print(f(A, B))
| 16.684211 | 59 | 0.381703 |
86479efec94998d8ac597979216e69bc35252174 | 807 | py | Python | log_mysql.py | kizunai/Weather-Scrapy | d2104d28dc303f6710b043f9821dcb84c665665d | [
"Apache-2.0"
] | null | null | null | log_mysql.py | kizunai/Weather-Scrapy | d2104d28dc303f6710b043f9821dcb84c665665d | [
"Apache-2.0"
] | null | null | null | log_mysql.py | kizunai/Weather-Scrapy | d2104d28dc303f6710b043f9821dcb84c665665d | [
"Apache-2.0"
] | null | null | null | import logging
from logging.handlers import TimedRotatingFileHandler
'''
logger = MyLog("test","log\\text.txt")
logger.logger.debug('debug message')
logger.logger.info('info message')
logger.logger.warning('warn message')
logger.logger.error('error message')
logger.logger.critical('critical message')
'''
| 31.038462 | 97 | 0.675341 |
8647faa20530aa0d730c1a40c079c5454d72f20d | 1,252 | py | Python | src/fiesta/urls.py | lerooze/django-fiesta | d521f50bcdd3d40e91f0474ec2fa7e256758e0a5 | [
"BSD-3-Clause"
] | null | null | null | src/fiesta/urls.py | lerooze/django-fiesta | d521f50bcdd3d40e91f0474ec2fa7e256758e0a5 | [
"BSD-3-Clause"
] | 3 | 2019-10-29T23:31:01.000Z | 2020-03-31T03:08:28.000Z | src/fiesta/urls.py | lerooze/django-fiesta | d521f50bcdd3d40e91f0474ec2fa7e256758e0a5 | [
"BSD-3-Clause"
] | null | null | null | # urls.py
from django.urls import path, register_converter
from fiesta import converters
from fiesta.views import views
from rest_framework.urlpatterns import format_suffix_patterns
# "http://django-sdmx.org/wsrest/"
# "http://django-sdmx.org/ws/"
register_converter(converters.ResourceConverter, 'res')
register_converter(converters.AgencyConverter, 'age')
register_converter(converters.ContextConverter, 'con')
urlpatterns = [
path('wsreg/SubmitStructure/', views.SubmitStructureRequestView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/schema/<con:context>/<age:agencyID>/<str:resourceID>/<str:version>', views.SDMXRESTfulSchemaView.as_view()),
path('wsrest/<res:resource>/', views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/',
views.SDMXRESTfulStructureView.as_view()),
path('wsrest/<res:resource>/<age:agencyID>/<str:resourceID>/'
'<str:version>/',
views.SDMXRESTfulStructureView.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 40.387097 | 125 | 0.742812 |
8648090bbe37bd69072d860284239d2be5f5a913 | 469 | py | Python | code-wars/010.moving-zeros-to-the-end.py | code-knayam/DataStructureAlgorithms | 8425911633d4d343c58798a123175289ed0df1fe | [
"MIT"
] | null | null | null | code-wars/010.moving-zeros-to-the-end.py | code-knayam/DataStructureAlgorithms | 8425911633d4d343c58798a123175289ed0df1fe | [
"MIT"
] | null | null | null | code-wars/010.moving-zeros-to-the-end.py | code-knayam/DataStructureAlgorithms | 8425911633d4d343c58798a123175289ed0df1fe | [
"MIT"
] | null | null | null | # Write an algorithm that takes an array and moves all of the zeros to the end, preserving the order of the other elements. | 31.266667 | 123 | 0.603412 |
864a44b2fa4b1d6dbb15ace15ef151c81922788f | 928 | py | Python | __main__.py | miezebieze/scott-launcher | a03597d0883af075128d1ea4ea53e7b5132807b1 | [
"MIT"
] | 1 | 2020-06-12T20:49:47.000Z | 2020-06-12T20:49:47.000Z | __main__.py | miezebieze/scott-launcher | a03597d0883af075128d1ea4ea53e7b5132807b1 | [
"MIT"
] | null | null | null | __main__.py | miezebieze/scott-launcher | a03597d0883af075128d1ea4ea53e7b5132807b1 | [
"MIT"
] | null | null | null | from enum import Enum
from window import Window
D = Enum ('Directions','N NE E SE S SW W NW')
selector_map = {
D.NW: [0.5,0.5], D.N: [1.5,0], D.NE: [2.5,0.5],
D.W: [0,1.5], D.E: [3,1.5],
D.SW: [0.5,2.5], D.S: [1.5,3], D.SE: [2.5,2.5],
}
selector_size = 100
window_size = selector_size*4
window = Window (window_size,window_size,selector_map,selector_size,selector_size)
# set actions here
from functools import partial
window.actions[D.NW] = partial (say,'northwast')
window.actions[D.N] = partial (say,'north')
window.actions[D.NE] = partial (say,'neorthest')
window.actions[D.W] = partial (say,'western')
window.actions[D.E] = partial (say,'easy')
window.actions[D.SW] = partial (say,'suess whest')
window.actions[D.S] = partial (say,'sissy')
window.actions[D.SE] = partial (say,'seoul')
window.go ()
| 29 | 82 | 0.626078 |
864a6447cb894e438d5dd8c26760c86abeb04746 | 760 | py | Python | cride/circles/serializers.py | monteals/C-Ride | 6e9368011f49ff619d1edaeaf1e8232685cc2095 | [
"MIT"
] | null | null | null | cride/circles/serializers.py | monteals/C-Ride | 6e9368011f49ff619d1edaeaf1e8232685cc2095 | [
"MIT"
] | 9 | 2020-04-24T01:29:38.000Z | 2022-03-12T00:25:50.000Z | cride/circles/serializers.py | monteals/C-Ride | 6e9368011f49ff619d1edaeaf1e8232685cc2095 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from cride.circles.models import Circle
| 36.190476 | 113 | 0.777632 |
864a8f3a33d90a7cbe60473e931b29c2b862bbbb | 13,719 | py | Python | contact/views.py | Dimstella/blockchain-contact-tracing-app-hospitals | e0b2bf2b3b8c06e58032faed99900d1c7b7d300d | [
"MIT"
] | null | null | null | contact/views.py | Dimstella/blockchain-contact-tracing-app-hospitals | e0b2bf2b3b8c06e58032faed99900d1c7b7d300d | [
"MIT"
] | null | null | null | contact/views.py | Dimstella/blockchain-contact-tracing-app-hospitals | e0b2bf2b3b8c06e58032faed99900d1c7b7d300d | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.template import loader
from django.http import HttpResponse
from django.shortcuts import render, redirect
from .models import Patient
from django.contrib import messages
import pandas as pd
from django.contrib.auth.decorators import login_required
from web3 import Web3
import datetime
import hashlib
import json
def users(request):
dt = pd.read_csv('countries.txt', sep='\n')
countries = []
df = dt.to_dict()
for k, country in df.items():
for k,v in country.items():
countries.append(v)
return render(request,'users.html', {'countries':countries})
def search_results(request):
dt = pd.read_csv('countries.txt', sep='\n')
countries = []
df = dt.to_dict()
for k, country in df.items():
for k,v in country.items():
countries.append(v)
ganache_url = 'http://127.0.0.1:7545'
web3 = Web3(Web3.HTTPProvider(ganache_url))
abi = json.loads('[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"inputs":[{"internalType":"enum Contact_tracing.Statuses","name":"_status","type":"uint8"},{"internalType":"string","name":"_postal","type":"string"},{"internalType":"string","name":"_hospitalName","type":"string"},{"internalType":"string","name":"_hashing","type":"string"},{"internalType":"string","name":"_country","type":"string"}],"name":"addPatient","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"getPatientsCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"index","type":"uint256"}],"name":"gettPatient","outputs":[{"components":[{"internalType":"enum Contact_tracing.Statuses","name":"status","type":"uint8"},{"internalType":"string","name":"postal","type":"string"},{"internalType":"string","name":"hospitalName","type":"string"},{"internalType":"string","name":"hashing","type":"string"},{"internalType":"string","name":"country","type":"string"}],"internalType":"struct Contact_tracing.Patient","name":"","type":"tuple"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"patients","outputs":[{"internalType":"enum Contact_tracing.Statuses","name":"status","type":"uint8"},{"internalType":"string","name":"postal","type":"string"},{"internalType":"string","name":"hospitalName","type":"string"},{"internalType":"string","name":"hashing","type":"string"},{"internalType":"string","name":"country","type":"string"}],"stateMutability":"view","type":"function"}]')
address = "0xa84580e93474b942b48B16CAEeaA1920962CBd90"
contract = web3.eth.contract(address = address, abi = abi)
no_patients = contract.functions.getPatientsCount().call()
lpatient = []
lhash = []
message = 'No infected people'
if request.method == 'GET':
region = request.GET.get("city")
postal = request.GET.get("postal")
country = request.GET.get("country")
counter = 0
for i in range(0, no_patients):
patients = contract.functions.gettPatient(i).call()
lpatient = list(patients)
print(lpatient)
if lpatient[0] == 0 and lpatient[1] == postal and lpatient[4] == country:
if lpatient[3] not in lhash and lpatient[2] != 'daleted':
lhash.append(lpatient[3])
counter = counter + 1
message = 'Infected people in your area are'
return render(request,'infected.html', {'countries':countries, 'infected_people': counter, 'message': message})
| 56.45679 | 1,732 | 0.648954 |
864be7bfbe0cf81e1b82f46620ffbb10dcc091db | 108 | py | Python | figures/collide1a.py | brandon-rhodes/pycon2010-mighty-dictionary | 1f75fdd42cd243c9f86a87f7b48f6b3498d032e8 | [
"MIT"
] | 22 | 2015-05-10T10:32:47.000Z | 2021-08-21T17:29:09.000Z | figures/collide1a.py | brandon-rhodes/pycon2010-mighty-dictionary | 1f75fdd42cd243c9f86a87f7b48f6b3498d032e8 | [
"MIT"
] | 2 | 2016-05-06T20:09:17.000Z | 2020-01-23T02:29:18.000Z | figures/collide1a.py | brandon-rhodes/pycon2010-mighty-dictionary | 1f75fdd42cd243c9f86a87f7b48f6b3498d032e8 | [
"MIT"
] | 4 | 2016-04-17T23:38:11.000Z | 2019-03-06T02:28:48.000Z | import _dictdraw, sys
d = {}
surface = _dictdraw.draw_dictionary(d, [4])
surface.write_to_png(sys.argv[1])
| 18 | 43 | 0.731481 |
864bf69490c6ee45920463f0c6f8b0b6dbff18dc | 4,624 | py | Python | ReportBot.py | SeveNNoff/InstagramReportBot | 0a613b5f2733d988a952d64d8141cb7390527b9e | [
"Apache-2.0"
] | 1 | 2020-10-13T16:04:08.000Z | 2020-10-13T16:04:08.000Z | ReportBot.py | SeveNNoff/InstagramReportBot | 0a613b5f2733d988a952d64d8141cb7390527b9e | [
"Apache-2.0"
] | null | null | null | ReportBot.py | SeveNNoff/InstagramReportBot | 0a613b5f2733d988a952d64d8141cb7390527b9e | [
"Apache-2.0"
] | 1 | 2021-04-17T04:42:29.000Z | 2021-04-17T04:42:29.000Z | # coding=utf-8
#!/usr/bin/env python3
from libs.check_modules import check_modules
from sys import exit
from os import _exit
check_modules()
from os import path
from libs.logo import print_logo
from libs.utils import print_success
from libs.utils import print_error
from libs.utils import ask_question
from libs.utils import print_status
from libs.utils import parse_proxy_file
from libs.proxy_harvester import find_proxies
from libs.attack import report_profile_attack
from libs.attack import report_video_attack
from multiprocessing import Process
from colorama import Fore, Back, Style
if __name__ == "__main__":
print_logo()
try:
main()
print(Style.RESET_ALL)
except KeyboardInterrupt:
print("\n\n" + Fore.RED + "[*] Program is closing!")
print(Style.RESET_ALL)
_exit(0) | 30.421053 | 96 | 0.599048 |
864c964912d3ec24af5b6c8c081c0833e7bd9b90 | 9,845 | py | Python | openfermioncirq/variational/ansatzes/default_initial_params_test.py | viathor/OpenFermion-Cirq | b4b7f8d82c40f0a6282873b5d2867e9d8778cea6 | [
"Apache-2.0"
] | null | null | null | openfermioncirq/variational/ansatzes/default_initial_params_test.py | viathor/OpenFermion-Cirq | b4b7f8d82c40f0a6282873b5d2867e9d8778cea6 | [
"Apache-2.0"
] | null | null | null | openfermioncirq/variational/ansatzes/default_initial_params_test.py | viathor/OpenFermion-Cirq | b4b7f8d82c40f0a6282873b5d2867e9d8778cea6 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import pytest
import cirq
import openfermion
from openfermioncirq import (
HamiltonianObjective,
LowRankTrotterAnsatz,
SplitOperatorTrotterAnsatz,
SwapNetworkTrotterAnsatz,
SwapNetworkTrotterHubbardAnsatz,
VariationalStudy,
prepare_gaussian_state,
simulate_trotter)
from openfermioncirq.trotter import (
LINEAR_SWAP_NETWORK, LOW_RANK, LowRankTrotterAlgorithm, SPLIT_OPERATOR)
# 4-qubit random DiagonalCoulombHamiltonian
diag_coul_hamiltonian = openfermion.random_diagonal_coulomb_hamiltonian(
4, real=True, seed=47141)
# 4-qubit H2 2-2 with bond length 0.7414
bond_length = 0.7414
geometry = [('H', (0., 0., 0.)), ('H', (0., 0., bond_length))]
h2_hamiltonian = openfermion.load_molecular_hamiltonian(
geometry, 'sto-3g', 1, format(bond_length), 2, 2)
# 4-qubit LiH 2-2 with bond length 1.45
bond_length = 1.45
geometry = [('Li', (0., 0., 0.)), ('H', (0., 0., bond_length))]
lih_hamiltonian = openfermion.load_molecular_hamiltonian(
geometry, 'sto-3g', 1, format(bond_length), 2, 2)
| 40.514403 | 80 | 0.685729 |
864d054eec7d0aab41c1311c42de1bf952355469 | 33,765 | py | Python | spyder/plugins/variableexplorer/widgets/arrayeditor.py | seryj/spyder | acea4f501c1a04d57b02e5e817708a69b503f430 | [
"MIT"
] | null | null | null | spyder/plugins/variableexplorer/widgets/arrayeditor.py | seryj/spyder | acea4f501c1a04d57b02e5e817708a69b503f430 | [
"MIT"
] | null | null | null | spyder/plugins/variableexplorer/widgets/arrayeditor.py | seryj/spyder | acea4f501c1a04d57b02e5e817708a69b503f430 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright Spyder Project Contributors
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
"""
NumPy Array Editor Dialog based on Qt
"""
# pylint: disable=C0103
# pylint: disable=R0903
# pylint: disable=R0911
# pylint: disable=R0201
# Standard library imports
from __future__ import print_function
# Third party imports
from qtpy.compat import from_qvariant, to_qvariant
from qtpy.QtCore import (QAbstractTableModel, QItemSelection, QLocale,
QItemSelectionRange, QModelIndex, Qt, Slot)
from qtpy.QtGui import QColor, QCursor, QDoubleValidator, QKeySequence
from qtpy.QtWidgets import (QAbstractItemDelegate, QApplication, QCheckBox,
QComboBox, QDialog, QDialogButtonBox, QGridLayout,
QHBoxLayout, QInputDialog, QItemDelegate, QLabel,
QLineEdit, QMenu, QMessageBox, QPushButton,
QSpinBox, QStackedWidget, QTableView, QVBoxLayout,
QWidget)
import numpy as np
# Local imports
from spyder.config.base import _
from spyder.config.fonts import DEFAULT_SMALL_DELTA
from spyder.config.gui import get_font, config_shortcut
from spyder.py3compat import (io, is_binary_string, is_string,
is_text_string, PY3, to_binary_string,
to_text_string)
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import add_actions, create_action, keybinding
# Note: string and unicode data types will be formatted with '%s' (see below)
SUPPORTED_FORMATS = {
'single': '%.6g',
'double': '%.6g',
'float_': '%.6g',
'longfloat': '%.6g',
'float16': '%.6g',
'float32': '%.6g',
'float64': '%.6g',
'float96': '%.6g',
'float128': '%.6g',
'csingle': '%r',
'complex_': '%r',
'clongfloat': '%r',
'complex64': '%r',
'complex128': '%r',
'complex192': '%r',
'complex256': '%r',
'byte': '%d',
'bytes8': '%s',
'short': '%d',
'intc': '%d',
'int_': '%d',
'longlong': '%d',
'intp': '%d',
'int8': '%d',
'int16': '%d',
'int32': '%d',
'int64': '%d',
'ubyte': '%d',
'ushort': '%d',
'uintc': '%d',
'uint': '%d',
'ulonglong': '%d',
'uintp': '%d',
'uint8': '%d',
'uint16': '%d',
'uint32': '%d',
'uint64': '%d',
'bool_': '%r',
'bool8': '%r',
'bool': '%r',
}
LARGE_SIZE = 5e5
LARGE_NROWS = 1e5
LARGE_COLS = 60
#==============================================================================
# Utility functions
#==============================================================================
def is_float(dtype):
"""Return True if datatype dtype is a float kind"""
return ('float' in dtype.name) or dtype.name in ['single', 'double']
def is_number(dtype):
"""Return True is datatype dtype is a number kind"""
return is_float(dtype) or ('int' in dtype.name) or ('long' in dtype.name) \
or ('short' in dtype.name)
def get_idx_rect(index_list):
"""Extract the boundaries from a list of indexes"""
rows, cols = list(zip(*[(i.row(), i.column()) for i in index_list]))
return ( min(rows), max(rows), min(cols), max(cols) )
#==============================================================================
# Main classes
#==============================================================================
#TODO: Implement "Paste" (from clipboard) feature
| 39.817217 | 84 | 0.53206 |
864d134a9c98ae3913986fb31b160d825e4250a2 | 4,638 | py | Python | libbeat/tests/system/idxmgmt.py | dddpaul/beats | 0d4a830fea46210ee264c52a977834d39493c750 | [
"ECL-2.0",
"Apache-2.0"
] | 4 | 2020-11-17T06:29:30.000Z | 2021-08-08T11:56:01.000Z | libbeat/tests/system/idxmgmt.py | dddpaul/beats | 0d4a830fea46210ee264c52a977834d39493c750 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2020-06-23T16:28:27.000Z | 2020-10-05T17:52:01.000Z | libbeat/tests/system/idxmgmt.py | dddpaul/beats | 0d4a830fea46210ee264c52a977834d39493c750 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-10-26T15:34:06.000Z | 2021-12-10T08:51:58.000Z | import datetime
import unittest
import pytest
from elasticsearch import NotFoundError
| 39.305085 | 99 | 0.618586 |
864d2029d6faf1f316dacf9ce08c2da5fefb2cbf | 9,735 | py | Python | scripts/policy/sdn_single_vm_multiple_policy_topology.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 5 | 2020-09-29T00:36:57.000Z | 2022-02-16T06:51:32.000Z | scripts/policy/sdn_single_vm_multiple_policy_topology.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 27 | 2019-11-02T02:18:34.000Z | 2022-02-24T18:49:08.000Z | scripts/policy/sdn_single_vm_multiple_policy_topology.py | atsgen/tf-test | 2748fcd81491450c75dadc71849d2a1c11061029 | [
"Apache-2.0"
] | 20 | 2019-11-28T16:02:25.000Z | 2022-01-06T05:56:58.000Z | '''*******AUTO-GENERATED TOPOLOGY*********'''
from __future__ import print_function
from builtins import range
from builtins import object
from tcutils.util import get_random_name,get_random_cidr
if __name__ == '__main__':
print("Currently topology limited to one domain/project..")
print("Based on need, can be extended to cover config for multiple domain/projects")
print()
my_topo = sdn_single_vm_multiple_policy_config(
domain='default-domain', project='admin')
x = my_topo.__dict__
# print "keys only:"
# for key, value in x.iteritems(): print key
# print
# print "keys & values:"
# for key, value in x.iteritems(): print key, "-->", value
import topo_helper
topo_h = topo_helper.topology_helper(my_topo)
#vmc_list= topo_h.get_vmc_list()
policy_vn = topo_h.get_policy_vn()
#
| 120.185185 | 754 | 0.638007 |
864d964c990a587e44dea52d446ea4e2f4b1a45e | 6,340 | py | Python | chaco/polygon_plot.py | burnpanck/chaco | 6457cdd28625991ba69fbbee105051cab237aa51 | [
"BSD-3-Clause"
] | 3 | 2017-09-17T17:32:06.000Z | 2022-03-15T13:04:43.000Z | chaco/polygon_plot.py | burnpanck/chaco | 6457cdd28625991ba69fbbee105051cab237aa51 | [
"BSD-3-Clause"
] | null | null | null | chaco/polygon_plot.py | burnpanck/chaco | 6457cdd28625991ba69fbbee105051cab237aa51 | [
"BSD-3-Clause"
] | 5 | 2015-05-17T16:08:11.000Z | 2021-02-23T09:23:42.000Z | """ Defines the PolygonPlot class.
"""
from __future__ import with_statement
# Major library imports
import numpy as np
# Enthought library imports.
from enable.api import LineStyle, black_color_trait, \
transparent_color_trait
from kiva.agg import points_in_polygon
from traits.api import Enum, Float, Tuple, Property, cached_property, \
on_trait_change
# Local imports.
from base_xy_plot import BaseXYPlot
| 36.647399 | 82 | 0.603312 |
86502380f0447c4c5893fb4c09f732239b1cc11f | 552 | py | Python | webapp/template_config.py | evgenyss/investing | b72da8587a4783bfdd389f1781dcd108d1a5e53f | [
"MIT"
] | null | null | null | webapp/template_config.py | evgenyss/investing | b72da8587a4783bfdd389f1781dcd108d1a5e53f | [
"MIT"
] | null | null | null | webapp/template_config.py | evgenyss/investing | b72da8587a4783bfdd389f1781dcd108d1a5e53f | [
"MIT"
] | null | null | null | import os
from datetime import timedelta
basedir = os.path.abspath(os.path.dirname(__file__))
API_DATA_URL = "https://invest-public-api.tinkoff.ru/rest/tinkoff.public.invest.api.contract.v1.InstrumentsService/"
API_LASTPRICES_URL = "https://invest-public-api.tinkoff.ru/rest/\
tinkoff.public.invest.api.contract.v1.MarketDataService/GetLastPrices"
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, '..', 'webapp.db')
REMEMBER_COOKIE_DURATION = timedelta(days=1)
SQLALCHEMY_TRACK_MODIFICATIONS = False
SECRET_KEY = ""
API_TOKEN = ""
| 29.052632 | 116 | 0.778986 |
8650d9e6c008eb69e8a60ee61bf0c6b0618f2c83 | 3,842 | py | Python | humann2/quantify/families.py | dytk2134/humann2 | 9b8f212bdd910ee7187f06f1550f0c86bce0473b | [
"MIT"
] | null | null | null | humann2/quantify/families.py | dytk2134/humann2 | 9b8f212bdd910ee7187f06f1550f0c86bce0473b | [
"MIT"
] | null | null | null | humann2/quantify/families.py | dytk2134/humann2 | 9b8f212bdd910ee7187f06f1550f0c86bce0473b | [
"MIT"
] | null | null | null | """
HUMAnN2: quantify_families module
Compute alignments by gene family
Copyright (c) 2014 Harvard School of Public Health
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import logging
import math
from .. import config
from .. import utilities
from .. import store
# name global logging instance
logger=logging.getLogger(__name__)
def gene_families(alignments,gene_scores,unaligned_reads_count):
"""
Compute the gene families from the alignments
"""
logger.debug("Compute gene families")
# Compute scores for each gene family for each bug set
alignments.convert_alignments_to_gene_scores(gene_scores)
# Process the gene id to names mappings
gene_names=store.Names(config.gene_family_name_mapping_file)
delimiter=config.output_file_column_delimiter
category_delimiter=config.output_file_category_delimiter
# Write the scores ordered with the top first
column_name=config.file_basename+"_Abundance-RPKs"
if config.remove_column_description_output:
column_name=config.file_basename
tsv_output=["# Gene Family"+delimiter+column_name]
# Add the unaligned reads count
tsv_output.append(config.unmapped_gene_name+delimiter+utilities.format_float_to_string(unaligned_reads_count))
# Print out the gene families with those with the highest scores first
for gene in gene_scores.gene_list_sorted_by_score("all"):
all_score=gene_scores.get_score("all",gene)
if all_score>0:
gene_name=gene_names.get_name(gene)
# Print the computation of all bugs for gene family
tsv_output.append(gene_name+delimiter+utilities.format_float_to_string(all_score))
# Process and print per bug if selected
if not config.remove_stratified_output:
# Print scores per bug for family ordered with those with the highest values first
scores_by_bug=gene_scores.get_scores_for_gene_by_bug(gene)
for bug in utilities.double_sort(scores_by_bug):
if scores_by_bug[bug]>0:
tsv_output.append(gene_name+category_delimiter+bug+delimiter
+utilities.format_float_to_string(scores_by_bug[bug]))
if config.output_format=="biom":
# Open a temp file if a conversion to biom is selected
tmpfile=utilities.unnamed_temp_file()
file_handle=open(tmpfile,'w')
file_handle.write("\n".join(tsv_output))
file_handle.close()
utilities.tsv_to_biom(tmpfile,config.genefamilies_file,"Gene")
else:
# Write output as tsv format
file_handle = open(config.genefamilies_file, "w")
file_handle.write("\n".join(tsv_output))
file_handle.close()
return config.genefamilies_file
| 40.442105 | 116 | 0.728267 |
865144cd196eb39a73555fc643c117d083a615cc | 744 | py | Python | Buta Nicolae/threads.py | RazvanBalau/parallel-2020 | bd9c0dea6cc70e167320f64632d7a235522dfdb3 | [
"MIT"
] | null | null | null | Buta Nicolae/threads.py | RazvanBalau/parallel-2020 | bd9c0dea6cc70e167320f64632d7a235522dfdb3 | [
"MIT"
] | null | null | null | Buta Nicolae/threads.py | RazvanBalau/parallel-2020 | bd9c0dea6cc70e167320f64632d7a235522dfdb3 | [
"MIT"
] | 23 | 2020-01-15T15:02:39.000Z | 2020-01-15T17:23:03.000Z | import threading
from multiprocessing import Queue
results = []
results2 = []
q = Queue()
t2 = threading.Thread(target=add_num, args=(q, ))
t1 = threading.Thread(target=take_numbers, args=(q, ))
t2.start()
t1.start()
t2.join()
t1.join()
q.close()
for result in results:
print ("adunare =", result)
for result in results2:
print ("scadere =", result) | 20.666667 | 54 | 0.606183 |
86517e62e82db5794921e6da0e41993351344585 | 576 | py | Python | code_week11_76_712/unique_paths.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week11_76_712/unique_paths.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | code_week11_76_712/unique_paths.py | dylanlee101/leetcode | b059afdadb83d504e62afd1227107de0b59557af | [
"Apache-2.0"
] | null | null | null | '''
m x n Start
Finish
7 x 3
1:
: m = 3, n = 2
: 3
:
3
1. -> ->
2. -> ->
3. -> ->
2:
: m = 7, n = 3
: 28
LeetCode
https://leetcode-cn.com/problems/unique-paths
''' | 15.567568 | 49 | 0.552083 |
86524c143ea8ba1817f21135f1c0c27360fa74e8 | 3,361 | py | Python | spektral/datasets/qm9.py | JonaBecher/spektral | ff59e16d959e0ec698428997363be20462625699 | [
"MIT"
] | 2,145 | 2019-01-21T20:49:44.000Z | 2022-03-28T20:27:27.000Z | spektral/datasets/qm9.py | jasper-park/spektral | ad2d96549c00f68ce992a7d29e2c3fd025fb529b | [
"MIT"
] | 259 | 2019-01-22T05:18:19.000Z | 2022-03-25T10:46:10.000Z | spektral/datasets/qm9.py | jasper-park/spektral | ad2d96549c00f68ce992a7d29e2c3fd025fb529b | [
"MIT"
] | 322 | 2019-02-11T16:18:27.000Z | 2022-03-24T16:26:59.000Z | import os
import os.path as osp
import numpy as np
from joblib import Parallel, delayed
from tensorflow.keras.utils import get_file
from tqdm import tqdm
from spektral.data import Dataset, Graph
from spektral.utils import label_to_one_hot, sparse
from spektral.utils.io import load_csv, load_sdf
ATOM_TYPES = [1, 6, 7, 8, 9]
BOND_TYPES = [1, 2, 3, 4]
| 29.482456 | 80 | 0.621839 |
865416b109055549efa6918ca6073abc6d07a490 | 602 | py | Python | code/Level 1 - Intro to CPX/5-acceleration/main.py | tscofield/cpx-training | 682a2cef6bb164bc7c374744de94c21581258392 | [
"MIT"
] | null | null | null | code/Level 1 - Intro to CPX/5-acceleration/main.py | tscofield/cpx-training | 682a2cef6bb164bc7c374744de94c21581258392 | [
"MIT"
] | null | null | null | code/Level 1 - Intro to CPX/5-acceleration/main.py | tscofield/cpx-training | 682a2cef6bb164bc7c374744de94c21581258392 | [
"MIT"
] | 1 | 2019-02-07T04:04:05.000Z | 2019-02-07T04:04:05.000Z | from adafruit_circuitplayground.express import cpx
# Main loop gets x, y and z axis acceleration, prints the values, and turns on
# red, green and blue, at levels related to the x, y and z values.
while True:
if cpx.switch:
print("Slide switch off!")
cpx.pixels.fill((0, 0, 0))
continue
else:
R = 0
G = 0
B = 0
x, y, z = cpx.acceleration
print((x, y, z))
if x:
R = R + abs(int(x))
if y:
G = G + abs(int(y))
if z:
B = B + abs(int(z))
cpx.pixels.fill((R, G, B))
| 25.083333 | 78 | 0.503322 |
86543345af40c82152fa05b0f713964bb091299c | 7,692 | py | Python | src/data_preprocess.py | QinganZhao/ML-based-driving-motion-prediction | 5a7772cf199d30e4e33bbe943775c2e19aac5d5b | [
"MIT"
] | 18 | 2019-01-08T02:53:56.000Z | 2022-03-03T11:34:20.000Z | src/data_preprocess.py | QinganZhao/ML-based-driving-motion-prediction | 5a7772cf199d30e4e33bbe943775c2e19aac5d5b | [
"MIT"
] | null | null | null | src/data_preprocess.py | QinganZhao/ML-based-driving-motion-prediction | 5a7772cf199d30e4e33bbe943775c2e19aac5d5b | [
"MIT"
] | 7 | 2018-06-13T20:12:25.000Z | 2022-02-20T08:39:07.000Z | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.patches as patches
def get_low_freq_data(data):
"""
Return a data matrix with 0.1s per time step data. (from 0.01s data)
"""
matrix = np.zeros((1, data.shape[1]))
for i in range(data.shape[0]):
if i % 10 == 0:
matrix = np.concatenate((matrix, data[i,:].reshape(1,data.shape[1])),axis=0)
return matrix[1:,:]
def data_process():
"""
This function serves to concatenate the information of two cars into one array.
Note: car1 -- mainlane car;
car2 -- merging car;
OutFormat:
0 case_ID
1 frame_ID
2 car1_long_pos
3 car1_long_vel
4 car1_lateral_pos
5 car1_lateral_displacement
6 car2_long_pos
7 car2_long_vel
8 car2_lateral_pos
9 car2_lateral_displacement
10 relative_long_vel (merge - mainlane)
11 relative_lateral_distance (merge - mainlane)
12 relative_long_distance (merge - mainlane)
13 car1_yaw
14 car2_yaw
15 situation label: (0: car1 yields car2; 1: car2 yields car1)
"""
data_matrix = np.zeros((1,16))
for i in range(128):
file_name_1 = 'data_'+str(i)+'_1.txt'
file_name_2 = 'data_'+str(i)+'_2.txt'
car1 = get_low_freq_data(load_data(file_name_1, 1))
car2 = get_low_freq_data(load_data(file_name_2, 2))
T = int(car1.shape[0])
#print(T)
current_data_matrix = np.zeros((T,16))
for j in range(1, T):
current_data_matrix[j,0] = i
current_data_matrix[j,1] = j
current_data_matrix[j,2] = car1[j,1]
current_data_matrix[j,3] = 10 * (car1[j,1] - car1[j-1,1])
current_data_matrix[j,4] = car1[j,2]
current_data_matrix[j,5] = car1[j,2] - car1[j-1,2]
current_data_matrix[j,6] = car2[j,1]
current_data_matrix[j,7] = 10 * (car2[j,1] - car2[j-1,1])
current_data_matrix[j,8] = car2[j,2]
current_data_matrix[j,9] = car2[j,2] - car2[j-1,2]
current_data_matrix[j,10] = current_data_matrix[j,7] - current_data_matrix[j,3]
current_data_matrix[j,11] = current_data_matrix[j,8] - current_data_matrix[j,4]
current_data_matrix[j,12] = current_data_matrix[j,6] - current_data_matrix[j,2]
current_data_matrix[j,13] = car1[j,3]
current_data_matrix[j,14] = car2[j,3]
if car1[-1,1] > car2[-1,1]:
current_data_matrix[j,15] = 1
else:
current_data_matrix[j,15] = 0
current_data_matrix = current_data_matrix[1:, :]
data_matrix = np.concatenate((data_matrix, current_data_matrix),axis=0)
np.savetxt('./data_matrix.txt', data_matrix[1:,:],'%.4f')
##################################################################
def divide_data(data_matrix, segment_length):
"""
This function serves to separate two situation cases.
"""
situation0_data = data_matrix[np.where(data_matrix[:,-1] == 0)]
situation1_data = data_matrix[np.where(data_matrix[:,-1] == 1)]
np.savetxt('./all_trajs_1.txt', situation0_data, '%.4f')
np.savetxt('./all_trajs_2.txt', situation1_data, '%.4f')
# count seq lengths
# separate sequence segments
# all_trajs_seg_1 = np.zeros((1, data_matrix.shape[1]))
# all_trajs_seg_2 = np.zeros((1, data_matrix.shape[1]))
all_trajs_1 = np.zeros((1, data_matrix.shape[1]))
all_trajs_2 = np.zeros((1, data_matrix.shape[1]))
count0, count1 = [], []
# for i in range(128):
# print('i = '+str(i))
# temp_data = data_matrix[np.where(data_matrix[:,0] == i)]
# if temp_data[0,-1] == 0:
# for j in range(temp_data.shape[0]-segment_length+1):
# temp_seg_data = temp_data[j:j+segment_length, :]
# count0.append(temp_seg_data.shape[0])
# all_trajs_seg_1 = np.concatenate((all_trajs_seg_1, temp_seg_data),axis=0)
# else:
# for j in range(temp_data.shape[0]-segment_length+1):
# temp_seg_data = temp_data[j:j+segment_length, :]
# count1.append(temp_seg_data.shape[0])
# all_trajs_seg_2 = np.concatenate((all_trajs_seg_2, temp_seg_data),axis=0)
for i in range(128):
print('i = '+str(i))
temp_data = data_matrix[np.where(data_matrix[:,0] == i)]
if temp_data[0,-1] == 0:
count0.append(temp_data.shape[0])
all_trajs_1 = np.concatenate((all_trajs_1, temp_data),axis=0)
elif temp_data[0,-1] == 1:
count1.append(temp_data.shape[0])
all_trajs_2 = np.concatenate((all_trajs_2, temp_data),axis=0)
print(all_trajs_1.shape)
print(all_trajs_2.shape)
print(sum(count0))
print(sum(count1))
# np.savetxt('./all_trajs_seg_1.txt', all_trajs_seg_1[1:,:], '%.4f')
# np.savetxt('./all_trajs_seg_2.txt', all_trajs_seg_2[1:,:], '%.4f')
np.savetxt('./all_trajs_seq_length_1.txt', np.array(count0), '%d')
np.savetxt('./all_trajs_seq_length_2.txt', np.array(count1), '%d')
#data_process()
#data_matrix = np.loadtxt('./data_matrix.txt')
#divide_data(data_matrix=data_matrix, segment_length=30)
###############################################
#check_data()
###############################################
def plot_vehicles(case_id, data_matrix):
"""
This function is to plot vehicle trajectories with bounding boxes.
"""
current_case_data = data_matrix[np.where(data_matrix[:,0]==case_id)]
T = current_case_data.shape[0]
fig = plt.figure(figsize=(20,2))
for i in range(T):
if i<10:
name='00'+str(i)
elif i>=10 and i<100:
name = '0'+str(i)
elif i>=100:
name = str(i)
ax = fig.add_subplot(111, aspect='equal')
ax.add_patch(
patches.Rectangle(
(current_case_data[i,2]-2.0, current_case_data[i,4]-0.9), # (x,y)
4.0, # width
1.8, # height
alpha = 0.3 + 0.7*(T-i) / float(T),
facecolor='blue',
edgecolor='black',
linewidth=0.5
)
)
ax.add_patch(
patches.Rectangle(
(current_case_data[i,6]-2.0, current_case_data[i,8]-0.9), # (x,y)
4.0, # width
1.8, # height
alpha = 0.3 + 0.7*(T-i) / float(T),
facecolor='red',
edgecolor='black',
linewidth=0.5
)
)
ax.plot(range(-805,-360),-605*np.ones(445), color='k',linewidth=1)
ax.plot(range(-805,-584),-610*np.ones(221), color='k',linewidth=1)
ax.plot(range(-445,-360),-610*np.ones(85), color='k',linewidth=1)
x = [[-584,-805],[-445,-805]]
y = [[-610,-618],[-610,-622]]
for l in range(len(x)):
ax.plot(x[l], y[l], color='k',linewidth=1)
ax.set_xlim(-680, -400)
ax.set_ylim(-620, -600)
ax.set_xticks([])
ax.set_yticks([])
fig.savefig('./vehicles_plot/'+str(case_id)+'_'+str(name)+'.png', bbox_inches='tight')
data_matrix = np.loadtxt('./data_matrix.txt')
plot_vehicles(case_id=8, data_matrix=data_matrix)
| 29.136364 | 89 | 0.623375 |
86545fd84ae7762d72208edf0f23289ff9f754a1 | 4,660 | py | Python | balancesheet/equityManager.py | tylertjburns/ledgerkeeper | cd69e9f48f35a973d08e450dfffdfea46bdc3802 | [
"MIT"
] | null | null | null | balancesheet/equityManager.py | tylertjburns/ledgerkeeper | cd69e9f48f35a973d08e450dfffdfea46bdc3802 | [
"MIT"
] | null | null | null | balancesheet/equityManager.py | tylertjburns/ledgerkeeper | cd69e9f48f35a973d08e450dfffdfea46bdc3802 | [
"MIT"
] | null | null | null | import balancesheet.mongoData.equities_data_service as dsvce
from userInteraction.financeCliInteraction import FinanceCliInteraction
import ledgerkeeper.mongoData.account_data_service as dsvca
from balancesheet.enums import EquityClass, AssetType, LiabiltyType, EquityTimeHorizon, EquityStatus, EquityContingency
import plotter as plot
| 43.551402 | 119 | 0.65279 |
8654bfcdcac1a02c8bb6c10945f68a8c090f869f | 27,781 | py | Python | pysnmp-with-texts/CISCO-DOT11-QOS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/CISCO-DOT11-QOS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/CISCO-DOT11-QOS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CISCO-DOT11-QOS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-DOT11-QOS-MIB
# Produced by pysmi-0.3.4 at Wed May 1 11:55:50 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, ConstraintsIntersection, ConstraintsUnion, ValueRangeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueRangeConstraint", "SingleValueConstraint")
CDot11IfVlanIdOrZero, = mibBuilder.importSymbols("CISCO-DOT11-IF-MIB", "CDot11IfVlanIdOrZero")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Bits, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter64, Counter32, ModuleIdentity, NotificationType, Unsigned32, IpAddress, MibIdentifier, iso, TimeTicks, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter64", "Counter32", "ModuleIdentity", "NotificationType", "Unsigned32", "IpAddress", "MibIdentifier", "iso", "TimeTicks", "Integer32")
TextualConvention, DisplayString, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "TruthValue")
ciscoDot11QosMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 416))
ciscoDot11QosMIB.setRevisions(('2006-05-09 00:00', '2003-11-24 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: ciscoDot11QosMIB.setRevisionsDescriptions(('The DEFVAL clauses have been removed from the definition of the objects cdot11QosCWmin, cdot11QosCWmax, cdot11QosMaxRetry and cdot11QosBackoffOffset, as the default values for these objects depend on the different traffic classes and that there are no common default values across the different traffic classes. ', 'Initial version of this MIB module.',))
if mibBuilder.loadTexts: ciscoDot11QosMIB.setLastUpdated('200605090000Z')
if mibBuilder.loadTexts: ciscoDot11QosMIB.setOrganization('Cisco Systems Inc.')
if mibBuilder.loadTexts: ciscoDot11QosMIB.setContactInfo(' Cisco Systems Customer Service Postal: 170 West Tasman Drive, San Jose CA 95134-1706. USA Tel: +1 800 553-NETS E-mail: [email protected]')
if mibBuilder.loadTexts: ciscoDot11QosMIB.setDescription('This MIB module provides network management support for QoS on wireless LAN devices. The objects defined in this MIB provide equivalent support as the objects in the IEEE 802.11E Standard draft. The original names of the objects in the standard are included in the REFERENCE clauses. GLOSSARY and ACRONYMS Access point (AP) Transmitter/receiver (transceiver) device that commonly connects and transports data between a wireless network and a wired network. AIFS Arbitration Interframe Space. It is one of the five different IFSs defined to provide priority levels for access to the wireless media. It shall be used by QSTAs to transmit data type frames (MPDUs) and management type frames (MMPDUs). BSS IEEE 802.11 Basic Service Set (Radio Cell). The BSS of an AP comprises of the stations directly associating with the AP. CW Contention Window. It is the time period between radio signal collisions caused by simultaneous broadcast from multiple wireless stations. The contention window is used to compute the random backoff of the radio broadcast. The IEEE 802.11b does not specify the unit for the time period. CWP Factor Contention Window Persistence Factor. It indicates the factor used in computing new CW values on every 15 unsuccessful attempt to transmit an MPDU or an MMPDU of a traffic class. It is a scaling factor in units of 1/16ths. IFS Inter-Frame Space is the time interval between frames. A STA shall determine that the medium is idle through the use of the carrier sense function for the interval specified. In other words, the size of the IFS determines the length of the backoff time interval of a device to the medium. In this case, the medium is the radio wave spectrum. The IEEE 802.11b standard does not specify any unit for the time interval. BSS IEEE 802.11 Basic Service Set (Radio Cell). The MAC Medium Access Control. Layer 2 in the network model. MPDU MAC protocol data unit. The unit of data exchanged between two peer MAC entities using the services of the physical layer (PHY). MMPDU Management type MAC protocol data unit. MSDU MAC service data unit. Information that is delivered as a unit between MAC service access points. QBSS Quality of service basic service set. QSTA QoS station. STA (WSTA) A non-AP IEEE 802.11 wireless station.')
ciscoDot11QosMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 0))
ciscoDot11QosMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 1))
ciscoDot11QosMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 2))
ciscoDot11QosConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1))
ciscoDot11QosQueue = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 2))
ciscoDot11QosStatistics = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3))
ciscoDot11QosNotifControl = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 4))
cdot11QosConfigTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1), )
if mibBuilder.loadTexts: cdot11QosConfigTable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosConfigTable.setDescription('This table contains the basic set of attributes to configure QoS queues for radio interfaces of a wireless LAN device. This table has an expansion dependent relationship with the ifTable. Each IEEE 802.11 wireless interface has different outbound queues for different network traffic class. For each entry in this table, there exists an entry in the ifTable of ifType ieee80211(71).')
cdot11QosConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-DOT11-QOS-MIB", "cdot11TrafficQueue"))
if mibBuilder.loadTexts: cdot11QosConfigEntry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosConfigEntry.setDescription('Each entry contains parameters to configure traffic contention window, AIFS, priority and MSDU lifetime for each traffic queue on an IEEE 802.11 interface.')
cdot11TrafficQueue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1, 1), Unsigned32())
if mibBuilder.loadTexts: cdot11TrafficQueue.setStatus('current')
if mibBuilder.loadTexts: cdot11TrafficQueue.setDescription('This is the index to the outbound traffic queue on the radio interface.')
cdot11TrafficClass = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1, 2), Cdot11QosTrafficClass()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11TrafficClass.setStatus('current')
if mibBuilder.loadTexts: cdot11TrafficClass.setDescription('This object specifies the traffic class and priority for the traffic on this queue.')
cdot11QosCWmin = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdot11QosCWmin.setReference('dot11CWmin, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosCWmin.setStatus('current')
if mibBuilder.loadTexts: cdot11QosCWmin.setDescription('This object defines the minimum contention window value for a traffic class. The minimum contention window is 2 to the power of cdot11QosCWmin minus 1, and that is from 0 to 1023. The cdot11QosCWmin value must be less than or equal to cdot11QosCWmax.')
cdot11QosCWmax = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 10))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdot11QosCWmax.setReference('dot11CWmax, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosCWmax.setStatus('current')
if mibBuilder.loadTexts: cdot11QosCWmax.setDescription('This object defines the maximum contention window value for a traffic class. The maximum contention window is 2 to the power of cdot11QosCWmax minus 1, and that is from 0 to 1023. The cdot11QosCWmax value must be greater than or equal to cdot11QosCWmin.')
cdot11QosBackoffOffset = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1, 5), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 20))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdot11QosBackoffOffset.setStatus('current')
if mibBuilder.loadTexts: cdot11QosBackoffOffset.setDescription('This specifies the offset of the radio backoff from the transmission media for this traffic class. The backoff interval of a radio is calculated from a pseudo random integer drawn from a uniform distribution over the interval determined by the maximum and minimum of the contention window.')
cdot11QosMaxRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdot11QosMaxRetry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosMaxRetry.setDescription('This specifies the number of times the radio retries for a particular transmission if there is a collision for the media.')
cdot11QosSupportTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 2), )
if mibBuilder.loadTexts: cdot11QosSupportTable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosSupportTable.setDescription('This table contains the attributes indicating QoS support information on the IEEE 802.11 interfaces of this device. This table has a sparse dependent relationship with the ifTable. For each entry in this table, there exists an entry in the ifTable of ifType ieee80211(71).')
cdot11QosSupportEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cdot11QosSupportEntry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosSupportEntry.setDescription('Each entry contains attributes to indicate if QoS and priority queue are supported for an IEEE 802.11 interface.')
cdot11QosOptionImplemented = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 2, 1, 1), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosOptionImplemented.setReference('dot11QosOptionImplemented, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosOptionImplemented.setStatus('current')
if mibBuilder.loadTexts: cdot11QosOptionImplemented.setDescription('This object indicates if QoS is implemented on this IEEE 802.11 network interface.')
cdot11QosOptionEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 2, 1, 2), TruthValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosOptionEnabled.setStatus('current')
if mibBuilder.loadTexts: cdot11QosOptionEnabled.setDescription("This object indicates if QoS is enabled on this IEEE 802.11 network interface. If it is 'true', QoS queuing is ON and traffic are prioritized according to their traffic class. If it is 'false', there is no QoS queuing and traffic are not prioritized.")
cdot11QosQueuesAvailable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 2, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(4, 64))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosQueuesAvailable.setReference('dot11QueuesAvailable, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosQueuesAvailable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosQueuesAvailable.setDescription('This object shows the number of QoS priority queues are available on this IEEE 802.11 network interface. That is the number of queue per interface in the cdot11QosConfigTable.')
cdot11QosQueueTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 2, 1), )
if mibBuilder.loadTexts: cdot11QosQueueTable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosQueueTable.setDescription('This table contains the queue weight and size information and statistics for each traffic queue on each the IEEE 802.11 interface. This table has a sparse dependent relationship with the ifTable. For each entry in this table, there exists an entry in the ifTable of ifType ieee80211(71).')
cdot11QosQueueEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-DOT11-QOS-MIB", "cdot11TrafficQueue"))
if mibBuilder.loadTexts: cdot11QosQueueEntry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosQueueEntry.setDescription('Each entry contains the current queue weight, size, and peak size information for each traffic queue on an IEEE 802.11 interface.')
cdot11QosQueueQuota = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 2, 1, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosQueueQuota.setStatus('current')
if mibBuilder.loadTexts: cdot11QosQueueQuota.setDescription('This is the current QoS priority queue packet quota for this queue on the overall bandwidth. The total available quota is platform dependent and is shared among all the transmitting queues. The queue with the largest quota value has the largest share of the overall bandwidth of the radio. The quota is allocated by the radio driver dynamically.')
cdot11QosQueueSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 2, 1, 1, 2), Gauge32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosQueueSize.setReference('dot11QueueSizeTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosQueueSize.setStatus('current')
if mibBuilder.loadTexts: cdot11QosQueueSize.setDescription('This is the current QoS priority queue size for this queue.')
cdot11QosQueuePeakSize = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 2, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosQueuePeakSize.setReference('dot11QueuePeakSizeTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosQueuePeakSize.setStatus('current')
if mibBuilder.loadTexts: cdot11QosQueuePeakSize.setDescription('This is the peak QoS priority queue size for this queue.')
cdot11QosStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1), )
if mibBuilder.loadTexts: cdot11QosStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosStatisticsTable.setDescription('This table contains the QoS statistics by traffic queue on each the IEEE 802.11 network interface. This table has a expansion dependent relationship with the ifTable. For each entry in this table, there exists an entry in the ifTable of ifType ieee80211(71).')
cdot11QosStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-DOT11-QOS-MIB", "cdot11TrafficQueue"))
if mibBuilder.loadTexts: cdot11QosStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosStatisticsEntry.setDescription('Each entry contain QoS statistics for data transmission and receive for each traffic queue on an IEEE 802.11 interface.')
cdot11QosDiscardedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosDiscardedFrames.setReference('dot11QosDiscardedFrameCountTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosDiscardedFrames.setStatus('current')
if mibBuilder.loadTexts: cdot11QosDiscardedFrames.setDescription('This is the counter for QoS discarded frames transmitting from this IEEE 802.11 interface for the traffic queue.')
cdot11QosFails = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosFails.setReference('dot11QosFailedCountTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosFails.setStatus('current')
if mibBuilder.loadTexts: cdot11QosFails.setDescription('This is the counter for QoS failures on this IEEE 802.11 interface for the traffic queue.')
cdot11QosRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosRetries.setReference('dot11QosRetryCountTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosRetries.setStatus('current')
if mibBuilder.loadTexts: cdot11QosRetries.setDescription('This is the counter for QoS retries performed on this IEEE 802.11 interface for the traffic queue.')
cdot11QosMutipleRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosMutipleRetries.setReference('dot11QosMutipleRetryCountTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosMutipleRetries.setStatus('current')
if mibBuilder.loadTexts: cdot11QosMutipleRetries.setDescription('This is the counter for QoS multiple retries performed on this IEEE 802.11 interface for the traffic queue.')
cdot11QosTransmittedFrames = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosTransmittedFrames.setReference('dot11QosTransmittedFrameCountTC, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosTransmittedFrames.setStatus('current')
if mibBuilder.loadTexts: cdot11QosTransmittedFrames.setDescription('This is the counter for QoS frames transmitted from this IEEE 802.11 interface for the traffic queue.')
cdot11QosIfStatisticsTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 2), )
if mibBuilder.loadTexts: cdot11QosIfStatisticsTable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfStatisticsTable.setDescription('This table contains the attributes indicating QoS statistics on the IEEE 802.11 interfaces of the device. This table has a sparse dependent relationship with the ifTable. For each entry in this table, there exists an entry in the ifTable of ifType ieee80211(71).')
cdot11QosIfStatisticsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cdot11QosIfStatisticsEntry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfStatisticsEntry.setDescription('Each entry contains attributes to support QoS statistics on an IEEE 802.11 interface.')
cdot11QosIfDiscardedFragments = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 3, 2, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosIfDiscardedFragments.setReference('dot11QosDiscardedFragments, IEEE 802.11E-2001/D1.')
if mibBuilder.loadTexts: cdot11QosIfDiscardedFragments.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfDiscardedFragments.setDescription('This object counts the number of QoS discarded transmitting fragments on this radio interface.')
cdot11QosIfVlanTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 3), )
if mibBuilder.loadTexts: cdot11QosIfVlanTable.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfVlanTable.setDescription('This table maps VLANs to different traffic classes and defines their QoS properties. This table has an expansion dependent relationship with the ifTable. For each entry in this table, there exists an entry in the ifTable of ifType ieee80211(71).')
cdot11QosIfVlanEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-DOT11-QOS-MIB", "cdot11QosIfVlanId"))
if mibBuilder.loadTexts: cdot11QosIfVlanEntry.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfVlanEntry.setDescription('Each entry defines parameters determining the traffic class and QoS configuration of a VLAN.')
cdot11QosIfVlanId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 3, 1, 1), CDot11IfVlanIdOrZero().subtype(subtypeSpec=ValueRangeConstraint(1, 4095)))
if mibBuilder.loadTexts: cdot11QosIfVlanId.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfVlanId.setDescription('This object identifies the VLAN (1 to 4095) on this radio interface.')
cdot11QosIfVlanTrafficClass = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 1, 3, 1, 2), Cdot11QosTrafficClass()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdot11QosIfVlanTrafficClass.setStatus('current')
if mibBuilder.loadTexts: cdot11QosIfVlanTrafficClass.setDescription('This is the QoS traffic class for the traffic transmitting on this VLAN. The traffic class determines the priority for the VLAN.')
cdot11QosNotifEnabled = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 416, 1, 4, 1), TruthValue().clone('false')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cdot11QosNotifEnabled.setStatus('current')
if mibBuilder.loadTexts: cdot11QosNotifEnabled.setDescription('Indicates whether cdot11QosChangeNotif notification will or will not be sent by the agent when the QoS configuration in the cdot11QosConfigTable is changed.')
cdot11QosChangeNotif = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 416, 0, 1)).setObjects(("CISCO-DOT11-QOS-MIB", "cdot11TrafficClass"))
if mibBuilder.loadTexts: cdot11QosChangeNotif.setStatus('current')
if mibBuilder.loadTexts: cdot11QosChangeNotif.setDescription('This notification will be sent when the QoS configuration in the cdot11QosConfigTable is changed. The object cdot11TrafficClass specifies the traffic class of which a queue is configured. The sending of these notifications can be enabled or disabled via cdot11QosNotifEnabled.')
ciscoDot11QosMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 1))
ciscoDot11QosMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 2))
ciscoDot11QosMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 1, 1)).setObjects(("CISCO-DOT11-QOS-MIB", "ciscoDot11QosConfigGroup"), ("CISCO-DOT11-QOS-MIB", "ciscoDot11QosStatsGroup"), ("CISCO-DOT11-QOS-MIB", "ciscoDot11QosNotifControlGroup"), ("CISCO-DOT11-QOS-MIB", "ciscoDot11QosNotificationGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDot11QosMIBCompliance = ciscoDot11QosMIBCompliance.setStatus('current')
if mibBuilder.loadTexts: ciscoDot11QosMIBCompliance.setDescription('The compliance statement for the configuration and status groups.')
ciscoDot11QosConfigGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 2, 1)).setObjects(("CISCO-DOT11-QOS-MIB", "cdot11TrafficClass"), ("CISCO-DOT11-QOS-MIB", "cdot11QosCWmin"), ("CISCO-DOT11-QOS-MIB", "cdot11QosCWmax"), ("CISCO-DOT11-QOS-MIB", "cdot11QosBackoffOffset"), ("CISCO-DOT11-QOS-MIB", "cdot11QosMaxRetry"), ("CISCO-DOT11-QOS-MIB", "cdot11QosOptionImplemented"), ("CISCO-DOT11-QOS-MIB", "cdot11QosOptionEnabled"), ("CISCO-DOT11-QOS-MIB", "cdot11QosQueuesAvailable"), ("CISCO-DOT11-QOS-MIB", "cdot11QosQueueQuota"), ("CISCO-DOT11-QOS-MIB", "cdot11QosQueueSize"), ("CISCO-DOT11-QOS-MIB", "cdot11QosQueuePeakSize"), ("CISCO-DOT11-QOS-MIB", "cdot11QosIfVlanTrafficClass"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDot11QosConfigGroup = ciscoDot11QosConfigGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDot11QosConfigGroup.setDescription('Configurations for IEEE 802.11 QoS.')
ciscoDot11QosStatsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 2, 2)).setObjects(("CISCO-DOT11-QOS-MIB", "cdot11QosIfDiscardedFragments"), ("CISCO-DOT11-QOS-MIB", "cdot11QosDiscardedFrames"), ("CISCO-DOT11-QOS-MIB", "cdot11QosFails"), ("CISCO-DOT11-QOS-MIB", "cdot11QosRetries"), ("CISCO-DOT11-QOS-MIB", "cdot11QosMutipleRetries"), ("CISCO-DOT11-QOS-MIB", "cdot11QosTransmittedFrames"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDot11QosStatsGroup = ciscoDot11QosStatsGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDot11QosStatsGroup.setDescription('Status and statistics for IEEE 802.11 QoS.')
ciscoDot11QosNotifControlGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 2, 3)).setObjects(("CISCO-DOT11-QOS-MIB", "cdot11QosNotifEnabled"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDot11QosNotifControlGroup = ciscoDot11QosNotifControlGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDot11QosNotifControlGroup.setDescription('Notification control configuration for QoS.')
ciscoDot11QosNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 416, 2, 2, 4)).setObjects(("CISCO-DOT11-QOS-MIB", "cdot11QosChangeNotif"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDot11QosNotificationGroup = ciscoDot11QosNotificationGroup.setStatus('current')
if mibBuilder.loadTexts: ciscoDot11QosNotificationGroup.setDescription('Notifications for QoS configuration.')
mibBuilder.exportSymbols("CISCO-DOT11-QOS-MIB", PYSNMP_MODULE_ID=ciscoDot11QosMIB, cdot11QosQueueTable=cdot11QosQueueTable, cdot11QosCWmin=cdot11QosCWmin, ciscoDot11QosMIBObjects=ciscoDot11QosMIBObjects, cdot11QosIfVlanTable=cdot11QosIfVlanTable, cdot11QosIfVlanId=cdot11QosIfVlanId, cdot11QosStatisticsTable=cdot11QosStatisticsTable, ciscoDot11QosQueue=ciscoDot11QosQueue, ciscoDot11QosStatistics=ciscoDot11QosStatistics, cdot11QosRetries=cdot11QosRetries, cdot11QosQueuesAvailable=cdot11QosQueuesAvailable, cdot11QosFails=cdot11QosFails, cdot11QosOptionEnabled=cdot11QosOptionEnabled, cdot11QosStatisticsEntry=cdot11QosStatisticsEntry, cdot11TrafficQueue=cdot11TrafficQueue, ciscoDot11QosMIBCompliance=ciscoDot11QosMIBCompliance, ciscoDot11QosMIBCompliances=ciscoDot11QosMIBCompliances, cdot11QosIfStatisticsTable=cdot11QosIfStatisticsTable, cdot11QosIfDiscardedFragments=cdot11QosIfDiscardedFragments, cdot11QosMaxRetry=cdot11QosMaxRetry, cdot11QosMutipleRetries=cdot11QosMutipleRetries, ciscoDot11QosMIB=ciscoDot11QosMIB, cdot11QosQueueQuota=cdot11QosQueueQuota, ciscoDot11QosMIBConformance=ciscoDot11QosMIBConformance, cdot11QosConfigTable=cdot11QosConfigTable, cdot11QosCWmax=cdot11QosCWmax, cdot11QosConfigEntry=cdot11QosConfigEntry, cdot11QosQueueSize=cdot11QosQueueSize, cdot11QosIfVlanEntry=cdot11QosIfVlanEntry, cdot11TrafficClass=cdot11TrafficClass, ciscoDot11QosStatsGroup=ciscoDot11QosStatsGroup, ciscoDot11QosConfig=ciscoDot11QosConfig, ciscoDot11QosNotifControl=ciscoDot11QosNotifControl, cdot11QosSupportEntry=cdot11QosSupportEntry, cdot11QosSupportTable=cdot11QosSupportTable, ciscoDot11QosMIBGroups=ciscoDot11QosMIBGroups, cdot11QosBackoffOffset=cdot11QosBackoffOffset, ciscoDot11QosConfigGroup=ciscoDot11QosConfigGroup, cdot11QosTransmittedFrames=cdot11QosTransmittedFrames, cdot11QosQueueEntry=cdot11QosQueueEntry, ciscoDot11QosNotifControlGroup=ciscoDot11QosNotifControlGroup, ciscoDot11QosNotificationGroup=ciscoDot11QosNotificationGroup, ciscoDot11QosMIBNotifs=ciscoDot11QosMIBNotifs, cdot11QosIfStatisticsEntry=cdot11QosIfStatisticsEntry, cdot11QosNotifEnabled=cdot11QosNotifEnabled, cdot11QosChangeNotif=cdot11QosChangeNotif, cdot11QosOptionImplemented=cdot11QosOptionImplemented, cdot11QosIfVlanTrafficClass=cdot11QosIfVlanTrafficClass, Cdot11QosTrafficClass=Cdot11QosTrafficClass, cdot11QosQueuePeakSize=cdot11QosQueuePeakSize, cdot11QosDiscardedFrames=cdot11QosDiscardedFrames)
| 156.073034 | 2,406 | 0.791908 |
865535238f10c51c669114bcf29b3699dd34b1e8 | 559 | py | Python | examples/django/hello_world/wsgi.py | liuyu81/SnapSearch-Client-Python | 41857806c2b26f0537de2dcc23a145107a4ecd04 | [
"MIT"
] | null | null | null | examples/django/hello_world/wsgi.py | liuyu81/SnapSearch-Client-Python | 41857806c2b26f0537de2dcc23a145107a4ecd04 | [
"MIT"
] | null | null | null | examples/django/hello_world/wsgi.py | liuyu81/SnapSearch-Client-Python | 41857806c2b26f0537de2dcc23a145107a4ecd04 | [
"MIT"
] | 1 | 2018-03-04T20:24:14.000Z | 2018-03-04T20:24:14.000Z | import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hello_world.settings")
# django WSGI application
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# load SnapSearch API credentials
api_email = "<email>"
api_key = "<key>"
# initialize the interceptor
from SnapSearch import Client, Detector, Interceptor
interceptor = Interceptor(Client(api_email, api_key), Detector())
# deploy the interceptor
from SnapSearch.wsgi import InterceptorMiddleware
application = InterceptorMiddleware(application, interceptor)
| 27.95 | 71 | 0.815742 |
8655870bbe029c575ef810e01964410eb82d6a13 | 10,603 | py | Python | confluent_server/confluent/syncfiles.py | xcat2/confluent | 47a83f4628df48638c2aebbfbcddc1531aac20d0 | [
"Apache-2.0"
] | 27 | 2015-02-11T13:56:46.000Z | 2021-12-28T14:17:20.000Z | confluent_server/confluent/syncfiles.py | jjohnson42/confluent | 47a83f4628df48638c2aebbfbcddc1531aac20d0 | [
"Apache-2.0"
] | 32 | 2015-09-23T13:19:04.000Z | 2022-03-15T13:50:45.000Z | confluent_server/confluent/syncfiles.py | xcat2/confluent | 47a83f4628df48638c2aebbfbcddc1531aac20d0 | [
"Apache-2.0"
] | 24 | 2015-07-14T20:41:55.000Z | 2021-07-15T04:18:51.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2021 Lenovo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import shutil
import tempfile
import confluent.sshutil as sshutil
import confluent.util as util
import confluent.noderange as noderange
import eventlet
import pwd
import grp
def sync_list_to_node(sl, node, suffixes):
targdir = tempfile.mkdtemp('.syncto{}'.format(node))
output = ''
try:
for ent in sl.replacemap:
stage_ent(sl.replacemap, ent, targdir)
if 'append' in suffixes:
while suffixes['append'] and suffixes['append'][0] == '/':
suffixes['append'] = suffixes['append'][1:]
for ent in sl.appendmap:
stage_ent(sl.appendmap, ent,
os.path.join(targdir, suffixes['append']))
if 'merge' in suffixes:
while suffixes['merge'] and suffixes['merge'][0] == '/':
suffixes['merge'] = suffixes['merge'][1:]
for ent in sl.mergemap:
stage_ent(sl.mergemap, ent,
os.path.join(targdir, suffixes['merge']), True)
if 'appendonce' in suffixes:
while suffixes['appendonce'] and suffixes['appendonce'][0] == '/':
suffixes['appendonce'] = suffixes['appendonce'][1:]
for ent in sl.appendoncemap:
stage_ent(sl.appendoncemap, ent,
os.path.join(targdir, suffixes['appendonce']), True)
sshutil.prep_ssh_key('/etc/confluent/ssh/automation')
output = util.run(
['rsync', '-rvLD', targdir + '/', 'root@{}:/'.format(node)])[0]
except Exception as e:
if 'CalledProcessError' not in repr(e):
# https://github.com/eventlet/eventlet/issues/413
# for some reason, can't catch the calledprocesserror normally
# for this exception, implement a hack workaround
raise
unreadablefiles = []
for root, dirnames, filenames in os.walk(targdir):
for filename in filenames:
filename = os.path.join(root, filename)
try:
with open(filename, 'r') as _:
pass
except OSError as e:
unreadablefiles.append(filename.replace(targdir, ''))
if unreadablefiles:
raise Exception("Syncing failed due to unreadable files: " + ','.join(unreadablefiles))
else:
raise
finally:
shutil.rmtree(targdir)
if not isinstance(output, str):
output = output.decode('utf8')
retval = {
'options': sl.optmap,
'output': output,
}
return retval # need dictionary with output and options
def stage_ent(currmap, ent, targdir, appendexist=False):
dst = currmap[ent]
everyfent = []
allfents = ent.split()
for tmpent in allfents:
fents = glob.glob(tmpent)
everyfent.extend(fents)
if not everyfent:
raise Exception('No matching files for "{}"'.format(ent))
if dst is None: # this is to indicate source and destination as one
dst = os.path.dirname(everyfent[0]) + '/'
while dst and dst[0] == '/':
dst = dst[1:]
if len(everyfent) > 1 and dst[-1] != '/':
raise Exception(
'Multiple files match {}, {} needs a trailing slash to indicate a directory'.format(ent, dst))
fulltarg = os.path.join(targdir, dst)
for targ in everyfent:
mkpathorlink(targ, fulltarg, appendexist)
syncrunners = {}
| 37.334507 | 106 | 0.529661 |
86559f8329a6ab4177af7e36ab701bd44241c349 | 1,804 | py | Python | fym/models/missile.py | JungYT/fym | d519c50086e3c7793b960e0326c92ed407836790 | [
"MIT"
] | 14 | 2019-08-23T10:02:39.000Z | 2021-12-24T13:04:43.000Z | fym/models/missile.py | JungYT/fym | d519c50086e3c7793b960e0326c92ed407836790 | [
"MIT"
] | 110 | 2019-08-23T08:09:32.000Z | 2021-06-29T06:54:48.000Z | fym/models/missile.py | JungYT/fym | d519c50086e3c7793b960e0326c92ed407836790 | [
"MIT"
] | 10 | 2019-09-02T03:49:06.000Z | 2021-05-10T04:35:40.000Z | import numpy as np
from fym.core import BaseSystem
| 25.408451 | 68 | 0.444568 |
8655ba3bbd3cf852e91a43d33c2f2f32d558bc09 | 2,175 | py | Python | egg/zoo/addition/data.py | chengemily/EGG | 40e84228e9d6e9ae785c0e4a846bb7e12e2b9291 | [
"MIT"
] | 1 | 2022-03-01T18:57:48.000Z | 2022-03-01T18:57:48.000Z | egg/zoo/addition/data.py | chengemily/EGG | 40e84228e9d6e9ae785c0e4a846bb7e12e2b9291 | [
"MIT"
] | null | null | null | egg/zoo/addition/data.py | chengemily/EGG | 40e84228e9d6e9ae785c0e4a846bb7e12e2b9291 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable, Optional, Tuple
import torch
from torch.utils.data import DataLoader
def get_dataloaders(opts) -> Tuple[Iterable[
Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]
], Iterable[
Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]
]]:
"Returning an iterator for tuple(sender_input, labels, receiver_input)."
full_data = enumerate_dataset(opts.input_size)
len_train = int(opts.training_density * len(full_data))
train_set, holdout_set = torch.utils.data.random_split(full_data,
[len_train, len(full_data) - len_train]
)
validation_set = train_set
train_set = ScaledDataset(train_set, opts.data_scaler)
train_loader, validation_loader, holdout_loader = DataLoader(train_set, batch_size=opts.batch_size, shuffle=True), \
DataLoader(validation_set, batch_size=len(validation_set)), \
DataLoader(holdout_set, batch_size=opts.batch_size)
return train_loader, validation_loader, holdout_loader
| 32.462687 | 120 | 0.624368 |
8657657d7e720812db7b2b5c2b5e580159afbea0 | 564 | py | Python | mathfun/lexographic.py | lsbardel/mathfun | 98e7c210409c2b5777e91059c3651cef4f3045dd | [
"BSD-3-Clause"
] | null | null | null | mathfun/lexographic.py | lsbardel/mathfun | 98e7c210409c2b5777e91059c3651cef4f3045dd | [
"BSD-3-Clause"
] | null | null | null | mathfun/lexographic.py | lsbardel/mathfun | 98e7c210409c2b5777e91059c3651cef4f3045dd | [
"BSD-3-Clause"
] | null | null | null | """
Next lexicographical permutation algorithm
https://www.nayuki.io/page/next-lexicographical-permutation-algorithm
"""
| 25.636364 | 69 | 0.439716 |
8657acef2a48725b54eda761add6bd9a28ac1231 | 3,379 | py | Python | simulation-web3py/utility.py | miker83z/cloud-chain | 0f5c43159544da547173ee0425e78bede261513b | [
"MIT"
] | null | null | null | simulation-web3py/utility.py | miker83z/cloud-chain | 0f5c43159544da547173ee0425e78bede261513b | [
"MIT"
] | null | null | null | simulation-web3py/utility.py | miker83z/cloud-chain | 0f5c43159544da547173ee0425e78bede261513b | [
"MIT"
] | 1 | 2022-01-27T14:18:24.000Z | 2022-01-27T14:18:24.000Z | import json
import os
from argparse import ArgumentTypeError
from eth_typing import Address
from web3.contract import Contract
from settings import MIN_VAL, MAX_VAL, DEPLOYED_CONTRACTS, CONFIG_DIR
def range_limited_val(arg: str) -> int:
"""
Type function for argparse - int within some predefined bounds.
"""
try:
s = int(arg)
except ValueError:
raise ArgumentTypeError("must be a int number")
if s < MIN_VAL or s > MAX_VAL:
raise ArgumentTypeError(f"argument must be > {str(MIN_VAL)} and < {str(MAX_VAL)}")
return s
| 32.805825 | 106 | 0.612607 |
8657d90fe7092bbdb91cfe26101bae5ad4366000 | 808 | py | Python | migrations/versions/816ea3631582_add_topics.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 9 | 2021-01-12T07:28:30.000Z | 2021-12-30T09:27:04.000Z | migrations/versions/816ea3631582_add_topics.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 16 | 2021-03-28T16:31:42.000Z | 2022-03-21T00:18:30.000Z | migrations/versions/816ea3631582_add_topics.py | OpenASL/HowSignBot | bd9c5bc0edfd6fb50bdce7c7c1d84462e1e704c2 | [
"MIT"
] | 1 | 2021-07-18T20:49:19.000Z | 2021-07-18T20:49:19.000Z | """add topics
Revision ID: 816ea3631582
Revises: 37a124b0099b
Create Date: 2021-03-13 14:20:10.044131
"""
from alembic import op
import sqlalchemy as sa
import bot
# revision identifiers, used by Alembic.
revision = "816ea3631582"
down_revision = "37a124b0099b"
branch_labels = None
depends_on = None
| 23.085714 | 90 | 0.674505 |
86582bc3a8c357318983a8612ae2ca233e2c4562 | 3,137 | py | Python | src/Lib/importlib/__init__.py | NUS-ALSET/ace-react-redux-brython | d009490263c5716a145d9691cd59bfcd5aff837a | [
"MIT"
] | 1 | 2021-08-05T12:45:39.000Z | 2021-08-05T12:45:39.000Z | src/Lib/importlib/__init__.py | NUS-ALSET/ace-react-redux-brython | d009490263c5716a145d9691cd59bfcd5aff837a | [
"MIT"
] | null | null | null | src/Lib/importlib/__init__.py | NUS-ALSET/ace-react-redux-brython | d009490263c5716a145d9691cd59bfcd5aff837a | [
"MIT"
] | 1 | 2019-09-05T08:20:07.000Z | 2019-09-05T08:20:07.000Z | """A pure Python implementation of import."""
__all__ = ['__import__', 'import_module', 'invalidate_caches']
# Bootstrap help #####################################################
# Until bootstrapping is complete, DO NOT import any modules that attempt
# to import importlib._bootstrap (directly or indirectly). Since this
# partially initialised package would be present in sys.modules, those
# modules would get an uninitialised copy of the source version, instead
# of a fully initialised version (either the frozen one or the one
# initialised below if the frozen one is not available).
import _imp # Just the builtin component, NOT the full Python module
import sys
from . import machinery
from . import _bootstrap
_bootstrap._setup(sys, _imp)
# To simplify imports in test code
_w_long = _bootstrap._w_long
_r_long = _bootstrap._r_long
# Fully bootstrapped at this point, import whatever you like, circular
# dependencies and startup overhead minimisation permitting :)
# Public API #########################################################
from ._bootstrap import __import__
def invalidate_caches():
"""Call the invalidate_caches() method on all meta path finders stored in
sys.meta_path (where implemented)."""
for finder in sys.meta_path:
if hasattr(finder, 'invalidate_caches'):
finder.invalidate_caches()
def find_loader(name, path=None):
"""Find the loader for the specified module.
First, sys.modules is checked to see if the module was already imported. If
so, then sys.modules[name].__loader__ is returned. If that happens to be
set to None, then ValueError is raised. If the module is not in
sys.modules, then sys.meta_path is searched for a suitable loader with the
value of 'path' given to the finders. None is returned if no loader could
be found.
Dotted names do not have their parent packages implicitly imported. You will
most likely need to explicitly import all parent packages in the proper
order for a submodule to get the correct loader.
"""
try:
loader = sys.modules[name].__loader__
if loader is None:
raise ValueError('{}.__loader__ is None'.format(name))
else:
return loader
except KeyError:
pass
return _bootstrap._find_module(name, path)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
level = 0
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
for character in name:
if character != '.':
break
level += 1
return _bootstrap._gcd_import(name[level:], package, level)
#need at least one import hook for importlib stuff to work.
from . import basehook
sys.meta_path.append(basehook.BaseHook())
| 36.057471 | 81 | 0.668792 |
86589b5f56644ed9997dc3b47f7f98c31f2ddd04 | 8,848 | py | Python | lib/arlunio/arlunio/image.py | swyddfa/stylo | 4d6b348ce5812dc5c2554bfd21a1550375aa05e1 | [
"MIT"
] | null | null | null | lib/arlunio/arlunio/image.py | swyddfa/stylo | 4d6b348ce5812dc5c2554bfd21a1550375aa05e1 | [
"MIT"
] | 13 | 2019-06-02T21:26:52.000Z | 2019-08-04T15:54:41.000Z | lib/arlunio/arlunio/image.py | swyddfa/stylo | 4d6b348ce5812dc5c2554bfd21a1550375aa05e1 | [
"MIT"
] | 1 | 2019-07-08T17:00:56.000Z | 2019-07-08T17:00:56.000Z | from __future__ import annotations
import base64
import io
import logging
import pathlib
from typing import Optional
# TODO: Remove these, as they should be contained in the numpy backend.
import numpy as np
import PIL.Image as PImage
import arlunio.ast as ast
import arlunio.color as color
import arlunio.mask as mask
import arlunio.math as math
logger = logging.getLogger(__name__)
def new(color) -> Image:
"""Creates a new image with the given background color."""
return ast.Node.builtin(name="image", color=color)
def fromarray(*args, **kwargs):
"""Create an image from an array
See :func:`pillow:PIL.Image.fromarray`
"""
return Image(PImage.fromarray(*args, **kwargs))
def load(*args, **kwargs) -> Image:
"""Load an image from the given file.
See :func:`pillow:PIL.Image.open`
"""
return Image(PImage.open(*args, **kwargs))
def save(image: Image, filename: str, mkdirs: bool = False) -> None:
"""Save an image in PNG format.
:param filename: The filepath to save the image to.
:param mkdirs: If true, make any parent directories
"""
path = pathlib.Path(filename)
if not path.parent.exists() and mkdirs:
path.parent.mkdir(parents=True)
with open(filename, "wb") as f:
image.save(f)
def encode(image: Image) -> bytes:
"""Return the image encoded as a base64 string.
Parameters
----------
image:
The image to encode.
Example
-------
::
>>> import arlunio.image as image
>>> img = image.new((8, 8), color='red')
>>> image.encode(img)
b'iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAFklEQVR4nGP8z8DwnwEPYMInOXwUAAASWwIOH0pJXQAAAABJRU5ErkJggg=='
"""
with io.BytesIO() as byte_stream:
image.save(byte_stream, "PNG")
image_bytes = byte_stream.getvalue()
return base64.b64encode(image_bytes)
def decode(bytestring: bytes) -> Image:
"""Decode the image represented by the given bytestring into an image object.
Parameters
----------
bytestring:
The bytestring to decode.
Example
-------
.. arlunio-image:: Decode Example
:include-code:
::
import arlunio.image as image
bytestring = b'iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAYAAADED76LAAAAFklEQVR4nGP8z8DwnwEPYMInOXwUAAASWwIOH0pJXQAAAABJRU5ErkJggg==' # noqa: E501
img = image.decode(bytestring)
"""
data = base64.b64decode(bytestring)
bytes_ = io.BytesIO(data)
return Image(load(bytes_))
def colorramp(values, start: Optional[str] = None, stop: Optional[str] = None) -> Image:
"""Given a 2d array of values, produce an image gradient based on them.
.. arlunio-image:: Colorramp Demo
:align: right
::
import arlunio.image as image
import arlunio.math as math
import numpy as np
cartesian = math.Cartesian()
p = cartesian(width=256, height=256)
x, y = p[:, :, 0], p[:, :, 1]
values = np.sin(2*x*np.pi) * np.sin(2*y* np.pi)
img = image.colorramp(values)
First this function will scale the input array so that all values fall in the range
:math:`[0, 1]`. It will then produce an image with the same dimensions as the
original array. The color of each pixel will be chosen based on the corresponding
value of the scaled array.
- If the value is :math:`0` the color will be given by the :code:`start` parameter
- If the value is :math:`1` the color will be given by the :code:`stop` parameter
- Otherwise the color will be some mix between the two.
Parameters
----------
values:
The array of values used to decide on the color.
start:
The color to use for values near :math:`0` (default, :code:`black`)
stop:
The color to use for values near :math:`1` (default, :code:`white`)
Examples
--------
.. arlunio-image:: Colorramp Demo 2
:include-code:
::
import arlunio.image as image
import arlunio.math as math
import numpy as np
cartesian = math.Cartesian()
p = cartesian(width=256, height=256)
x = image.colorramp(p[:, :, 0], start="#0000", stop="#f007")
y = image.colorramp(p[:, :, 1], start="#0000", stop="#00f7")
img = x + y
"""
# Scale all the values so that they fall into the range [0, 1]
minx = np.min(values)
vs = np.array(values) - minx
vs = vs / np.max(vs)
if start is None:
start = "black"
if stop is None:
stop = "white"
start = color.getcolor(start, "RGBA")
stop = color.getcolor(stop, "RGBA")
funcs = [math.lerp(a, b) for a, b in zip(start, stop)]
channels = [np.floor(func(vs)) for func in funcs]
pixels = np.array(np.dstack(channels), dtype=np.uint8)
return fromarray(pixels)
def fill(
region,
foreground: Optional[str] = None,
background: Optional[str] = None,
image: Optional[Image] = None,
) -> Image:
"""Apply color to an image, as specified by a mask.
Parameters
----------
mask:
The mask that selects the region to be coloured
foreground:
A string representation of the color to use, this can be in any format that is
supported by the :mod:`pillow:PIL.ImageColor` module. If omitted this will
default to black.
background:
In the case where an existing image is not provided this parameter can be used
to set the background color of the generated image. This can be any string that
is accepted by the :mod:`pillow:PIL.ImageColor` module. If omitted this will
default to transparent
image:
The image to color in, if omitted a blank image will be used.
Example
--------
.. arlunio-image:: Fill Demo
:include-code:
::
import arlunio.image as image
import arlunio.shape as shape
circle = shape.Circle(x0=-0.5, y0=0.25, r=0.6)
img = image.fill(circle(width=512, height=256), foreground='red')
circle.x0, circle.y0 = 0, 0
img = image.fill(circle(width=512, height=256), foreground='#0f0', image=img)
circle.x0, circle.y0 = 0.5, -0.25
img = image.fill(circle(width=512, height=256), foreground='blue', image=img)
"""
foreground = "#000" if foreground is None else foreground
fill_color = color.getcolor(foreground, "RGBA")
if image is None:
background = "#0000" if background is None else background
image = new(color=background)
if not isinstance(region, ast.Node):
region = region()
return ast.Node.fill(image, region, fill_color)
| 26.570571 | 148 | 0.617315 |
865a20fd18fa17925d3611f9138e1d796448c4ce | 9,001 | py | Python | yamlable/tests/test_yamlable.py | smarie/python-yamlable | c726f5c56eea037968560ce83f9753bde1514991 | [
"BSD-3-Clause"
] | 27 | 2018-07-12T17:09:41.000Z | 2022-02-07T18:56:26.000Z | yamlable/tests/test_yamlable.py | smarie/python-yamlable | c726f5c56eea037968560ce83f9753bde1514991 | [
"BSD-3-Clause"
] | 14 | 2018-07-10T08:09:21.000Z | 2022-03-02T15:29:56.000Z | yamlable/tests/test_yamlable.py | smarie/python-yamlable | c726f5c56eea037968560ce83f9753bde1514991 | [
"BSD-3-Clause"
] | 1 | 2020-09-22T16:13:51.000Z | 2020-09-22T16:13:51.000Z | from copy import copy
try:
# Python 2 only:
from StringIO import StringIO
# create a variant that can serve as a context manager
except ImportError:
from io import StringIO
try: # python 3.5+
from typing import Dict, Any
from yamlable import Y
except ImportError:
pass
import pytest
from yaml import dump, load
from yamlable import YamlAble, yaml_info
def test_yamlable_incomplete_description():
""" Tests that if __yaml_tag_suffix__ is not provided a YamlAble subclass cannot be declared """
with pytest.raises(NotImplementedError) as err_info:
# instantiate
f = Foo()
# dump
f.dumps_yaml()
assert "does not seem to have a non-None '__yaml_tag_suffix__' field" in str(err_info.value)
def test_yamlable():
""" Tests that YamlAble works correctly """
# instantiate
f = Foo(1, 'hello') # note:
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.Foo
a: 1
b: hello
"""
# dump io
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == Foo.loads_yaml(y)
# load io
assert f == Foo.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
def test_yamlable_legacy_method_names():
""" Tests that YamlAbleMixIn works correctly """
global enc
global dec
enc, dec = False, False
# instantiate
f = FooLegacy(1, 'hello')
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.FooLegacy
a: 1
b: hello
"""
# dump io
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == FooLegacy.loads_yaml(y)
# load io
assert f == FooLegacy.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
assert enc
assert dec
# TODO override so that tag is not supported, to check error message
def test_yamlable_default_impl():
""" tests that the default implementation works """
f = Foo_Default(1, 'hello')
s = """!yamlable/yaml.tests.Foo_Default
a: 1
b: hello
"""
assert dump(f, default_flow_style=False) == s
assert dump(load(dump(load(s))), default_flow_style=False) == s
def test_abstract_parent_error():
"""This tests that we can define an abstract parent class with the YamlAble behaviour and inherit it"""
# instantiate
e = FooError(1, 'hello')
# dump
with pytest.raises(NotImplementedError):
e.dumps_yaml()
def test_abstract_parent():
"""This tests that we can define an abstract parent class with the YamlAble behaviour and inherit it"""
# instantiate
f = FooValid(1, 'hello') # note:
# dump
y = f.dumps_yaml(default_flow_style=False)
assert y == """!yamlable/yaml.tests.FooValid
a: 1
b: hello
"""
# dump io
s = MemorizingStringIO()
f.dump_yaml(s, default_flow_style=False)
assert s.value == y
# dump pyyaml
assert dump(f, default_flow_style=False) == y
# load
assert f == FooValid.loads_yaml(y)
# load io
assert f == FooValid.load_yaml(StringIO(y))
# load pyyaml
assert f == load(y)
| 26.551622 | 115 | 0.56627 |
865a984bc7cd45c042cff94434fa063630359314 | 29,537 | py | Python | src/twisted/web/server.py | vmario/twisted | 34f3d8f8c6f51772eaed92a89257ea011e9a818d | [
"Unlicense",
"MIT"
] | null | null | null | src/twisted/web/server.py | vmario/twisted | 34f3d8f8c6f51772eaed92a89257ea011e9a818d | [
"Unlicense",
"MIT"
] | null | null | null | src/twisted/web/server.py | vmario/twisted | 34f3d8f8c6f51772eaed92a89257ea011e9a818d | [
"Unlicense",
"MIT"
] | null | null | null | # -*- test-case-name: twisted.web.test.test_web -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This is a web server which integrates with the twisted.internet infrastructure.
@var NOT_DONE_YET: A token value which L{twisted.web.resource.IResource.render}
implementations can return to indicate that the application will later call
C{.write} and C{.finish} to complete the request, and that the HTTP
connection should be left open.
@type NOT_DONE_YET: Opaque; do not depend on any particular type for this
value.
"""
import copy
import os
import re
from html import escape
from typing import List, Optional
from urllib.parse import quote as _quote
import zlib
from binascii import hexlify
from zope.interface import implementer
from twisted.python.compat import networkString, nativeString
from twisted.spread.pb import Copyable, ViewPoint
from twisted.internet import address, interfaces
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from twisted.web import iweb, http, util
from twisted.web.http import unquote
from twisted.python import reflect, failure, components
from twisted import copyright
from twisted.web import resource
from twisted.web.error import UnsupportedMethod
from incremental import Version
from twisted.python.deprecate import deprecatedModuleAttribute
from twisted.logger import Logger
NOT_DONE_YET = 1
__all__ = [
"supportedMethods",
"Request",
"Session",
"Site",
"version",
"NOT_DONE_YET",
"GzipEncoderFactory",
]
# backwards compatibility
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.datetimeToString instead",
"twisted.web.server",
"date_time_string",
)
deprecatedModuleAttribute(
Version("Twisted", 12, 1, 0),
"Please use twisted.web.http.stringToDatetime instead",
"twisted.web.server",
"string_date_time",
)
date_time_string = http.datetimeToString
string_date_time = http.stringToDatetime
# Support for other methods may be implemented on a per-resource basis.
supportedMethods = (b"GET", b"HEAD", b"POST")
class _RemoteProducerWrapper:
def __init__(self, remote):
self.resumeProducing = remote.remoteMethod("resumeProducing")
self.pauseProducing = remote.remoteMethod("pauseProducing")
self.stopProducing = remote.remoteMethod("stopProducing")
version = networkString(f"TwistedWeb/{copyright.version}")
| 32.601545 | 88 | 0.593561 |
865ad1612ffd549ba548de0cbe66876028b4a804 | 321 | py | Python | pycoin/symbols/doge.py | jaschadub/pycoin | 1e8d0d9fe20ce0347b97847bb529cd1bd84c7442 | [
"MIT"
] | 1 | 2018-01-17T05:08:32.000Z | 2018-01-17T05:08:32.000Z | pycoin/symbols/doge.py | impactog/pycoin | 3db6f82afa3054d8d07caca4909e1aed3de2fceb | [
"MIT"
] | null | null | null | pycoin/symbols/doge.py | impactog/pycoin | 3db6f82afa3054d8d07caca4909e1aed3de2fceb | [
"MIT"
] | 1 | 2020-03-10T12:21:51.000Z | 2020-03-10T12:21:51.000Z | from pycoin.networks.bitcoinish import create_bitcoinish_network
network = create_bitcoinish_network(
symbol="DOGE", network_name="Dogecoin", subnet_name="mainnet",
wif_prefix_hex="9e", address_prefix_hex="1e", pay_to_script_prefix_hex="16",
bip32_prv_prefix_hex="02fd3955", bip32_pub_prefix_hex="02fd3929")
| 45.857143 | 80 | 0.800623 |
865af347d7d59f9bd67eb9dbfa07a221fbd308e5 | 554 | py | Python | Pset/hamming_numbers.py | MarkHershey/python-learning | 8d6c87941af6db5878b59483526ed402f4b319b3 | [
"MIT"
] | 9 | 2020-06-05T17:01:23.000Z | 2022-03-16T19:55:50.000Z | Pset/hamming_numbers.py | MarkHershey/python-learning | 8d6c87941af6db5878b59483526ed402f4b319b3 | [
"MIT"
] | null | null | null | Pset/hamming_numbers.py | MarkHershey/python-learning | 8d6c87941af6db5878b59483526ed402f4b319b3 | [
"MIT"
] | null | null | null | def hamming(n):
"""Returns the nth hamming number"""
hamming = {1}
x = 1
while len(hamming) <= n * 3.5:
new_hamming = {1}
for i in hamming:
new_hamming.add(i * 2)
new_hamming.add(i * 3)
new_hamming.add(i * 5)
# merge new number into hamming set
hamming = hamming.union(new_hamming)
hamming = sorted(list(hamming))
return hamming[n - 1]
print(hamming(970))
# hamming(968) should be 41943040
# hamming(969) should be 41990400
# hamming(970) should be 42187500
| 24.086957 | 44 | 0.592058 |
865b44ebd78e20ddd28ec532ea20204eaa6a07dc | 848 | py | Python | examples/run_merger.py | needlehaystack/needlestack | e00529a2a7c2d85059936a85f54dfb55e515b6ef | [
"Apache-2.0"
] | 3 | 2019-10-03T22:15:21.000Z | 2022-02-08T09:05:41.000Z | examples/run_merger.py | cungtv/needlestack | e00529a2a7c2d85059936a85f54dfb55e515b6ef | [
"Apache-2.0"
] | 1 | 2021-04-30T21:08:47.000Z | 2021-04-30T21:08:47.000Z | examples/run_merger.py | cungtv/needlestack | e00529a2a7c2d85059936a85f54dfb55e515b6ef | [
"Apache-2.0"
] | 2 | 2019-08-02T19:13:09.000Z | 2019-10-25T01:47:17.000Z | import logging
from grpc_health.v1 import health_pb2, health_pb2_grpc
from grpc_health.v1.health import HealthServicer
from needlestack.apis import servicers_pb2_grpc
from needlestack.servicers import factory
from needlestack.servicers.merger import MergerServicer
from examples import configs
logging.getLogger("kazoo").setLevel("WARN")
if __name__ == "__main__":
main()
| 25.69697 | 92 | 0.792453 |
865b48e5b6d60c2c5b81fb4b0a827e80f5502ece | 4,482 | py | Python | engine_wrapper.py | lidevelopers/Lishogi-Bot-1 | 5e669870930fe497e323324f36ccdbf5b04d26d3 | [
"MIT"
] | null | null | null | engine_wrapper.py | lidevelopers/Lishogi-Bot-1 | 5e669870930fe497e323324f36ccdbf5b04d26d3 | [
"MIT"
] | 2 | 2021-06-28T11:09:19.000Z | 2021-06-30T16:59:13.000Z | engine_wrapper.py | lidevelopers/Lishogi-Bot-1 | 5e669870930fe497e323324f36ccdbf5b04d26d3 | [
"MIT"
] | 9 | 2021-06-28T08:06:08.000Z | 2021-10-06T05:01:57.000Z | import os
import shogi
import backoff
import subprocess
from util import *
import logging
logger = logging.getLogger(__name__)
import engine_ctrl
| 29.486842 | 109 | 0.566934 |
865bbf72a785e72699020e27186c8a54194bf255 | 1,615 | py | Python | examples/python/test_as2.py | sloriot/cgal-swig-bindings | c9c5afdf64fa0c52f9c3785173159167ab2b3163 | [
"BSL-1.0"
] | null | null | null | examples/python/test_as2.py | sloriot/cgal-swig-bindings | c9c5afdf64fa0c52f9c3785173159167ab2b3163 | [
"BSL-1.0"
] | null | null | null | examples/python/test_as2.py | sloriot/cgal-swig-bindings | c9c5afdf64fa0c52f9c3785173159167ab2b3163 | [
"BSL-1.0"
] | null | null | null | from CGAL.CGAL_Kernel import Point_2
from CGAL.CGAL_Kernel import Weighted_point_2
from CGAL.CGAL_Alpha_shape_2 import Alpha_shape_2
from CGAL.CGAL_Alpha_shape_2 import Weighted_alpha_shape_2
from CGAL.CGAL_Alpha_shape_2 import Weighted_alpha_shape_2_Face_handle
from CGAL.CGAL_Alpha_shape_2 import GENERAL, EXTERIOR, SINGULAR, REGULAR, INTERIOR
from CGAL.CGAL_Alpha_shape_2 import Alpha_shape_2_Vertex_handle
from CGAL.CGAL_Alpha_shape_2 import Alpha_shape_2_Face_handle
from CGAL.CGAL_Alpha_shape_2 import Face_Interval_3
lst = []
lst.append(Point_2(0, 0))
lst.append(Point_2(0, 4))
lst.append(Point_2(44, 0))
lst.append(Point_2(44, 5))
lst.append(Point_2(444, 51))
lst.append(Point_2(14, 1))
t = Alpha_shape_2(lst, 0, GENERAL)
t2 = Alpha_shape_2(lst, 0)
t.clear()
t.make_alpha_shape(lst)
for d in t.alpha():
print(d)
for v in t.finite_vertices():
type = t.classify(v)
print(v.get_range()[0])
if type == INTERIOR:
print("INTERIOR")
elif type == SINGULAR:
print("SINGULAR")
elif type == REGULAR:
print("REGULAR")
elif type == EXTERIOR:
print("EXTERIOR")
for f in t.finite_faces():
i = f.get_ranges(0)
print(i.first)
print(i.second)
print(i.third)
was = Weighted_alpha_shape_2()
lst_wp = []
lst_wp.append(Weighted_point_2(Point_2(0, 0), 1))
lst_wp.append(Weighted_point_2(Point_2(0, 4), 1))
lst_wp.append(Weighted_point_2(Point_2(44, 0), 1))
lst_wp.append(Weighted_point_2(Point_2(44, 5), 1))
lst_wp.append(Weighted_point_2(Point_2(444, 51), 1))
lst_wp.append(Weighted_point_2(Point_2(14, 1), 1))
was.make_alpha_shape(lst_wp)
| 26.47541 | 82 | 0.740557 |
865c3c1cc62a44e9c288bd3980673e8cb1a26d5e | 93 | py | Python | connections/mode.py | pavithra-mahamani/TAF | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | [
"Apache-2.0"
] | null | null | null | connections/mode.py | pavithra-mahamani/TAF | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | [
"Apache-2.0"
] | null | null | null | connections/mode.py | pavithra-mahamani/TAF | ff854adcc6ca3e50d9dc64e7756ca690251128d3 | [
"Apache-2.0"
] | null | null | null | '''
Created on Jan 18, 2018
@author: riteshagarwal
'''
java = False
rest = False
cli = False | 11.625 | 23 | 0.677419 |
865e9017c35669feb5f2b679820ab813bc9d8b73 | 533 | py | Python | scene_action2.py | encela95dus/ios_pythonista_examples | e136cdcb05126f0f9b9f6fb6365870876b419619 | [
"MIT"
] | 36 | 2019-01-12T04:17:49.000Z | 2022-03-31T05:33:29.000Z | scene_action2.py | Backup-eric645/ios_pythonista_examples | e136cdcb05126f0f9b9f6fb6365870876b419619 | [
"MIT"
] | null | null | null | scene_action2.py | Backup-eric645/ios_pythonista_examples | e136cdcb05126f0f9b9f6fb6365870876b419619 | [
"MIT"
] | 15 | 2018-12-30T21:18:05.000Z | 2022-01-30T13:17:07.000Z | import scene
scene.run(MyScene())
| 25.380952 | 52 | 0.525328 |
865e9adf437f79d36a8db146c26aeeb0ca4e34fa | 672 | py | Python | bot/commands/disconnect.py | aq1/vkPostman | db6b8d387d484ff53d12dcaf77ba3dcaa6da3822 | [
"MIT"
] | 1 | 2020-09-14T04:47:31.000Z | 2020-09-14T04:47:31.000Z | bot/commands/disconnect.py | aq1/vkPostman | db6b8d387d484ff53d12dcaf77ba3dcaa6da3822 | [
"MIT"
] | null | null | null | bot/commands/disconnect.py | aq1/vkPostman | db6b8d387d484ff53d12dcaf77ba3dcaa6da3822 | [
"MIT"
] | null | null | null | from bot.commands import BaseCommand
import mongo
| 25.846154 | 66 | 0.633929 |
865f59e775e337c6b42c37791b8b1b83a1c4fa34 | 2,522 | py | Python | pysh/bash_vm/shell_command.py | JordanKoeller/Pysch | 6775db00e6d551328ce49a50a5987223a9e9a9c3 | [
"MIT"
] | null | null | null | pysh/bash_vm/shell_command.py | JordanKoeller/Pysch | 6775db00e6d551328ce49a50a5987223a9e9a9c3 | [
"MIT"
] | null | null | null | pysh/bash_vm/shell_command.py | JordanKoeller/Pysch | 6775db00e6d551328ce49a50a5987223a9e9a9c3 | [
"MIT"
] | null | null | null | from __future__ import annotations
import subprocess
import os
from typing import List, Dict, Iterator, Optional, Tuple
def __iter__(self) -> Iterator[str]:
return iter(self._split_tokens())
def __str__(self) -> str:
return f'<STDOUT value={self.value} code={self.code} >'
def _split_tokens(self) -> List[str]:
ret = []
in_quotes = None
accumulator: List[str] = []
for char in self.value:
if _whitespace(char) and not in_quotes and accumulator:
ret.append(''.join(accumulator))
accumulator = []
elif in_quotes == None and _quotes(char):
in_quotes = char
elif in_quotes and in_quotes == char:
in_quotes = None
if accumulator:
ret.append(''.join(accumulator))
accumulator = []
elif in_quotes and _quotes(char):
raise ValueError(
f"Found unmatched quote characters in string {self.value}")
else:
accumulator.append(char)
return ret
def _quotes(c: str) -> bool:
return c in ['"', "'"]
def _whitespace(c: str) -> bool:
return str.isspace(c)
| 28.022222 | 81 | 0.527756 |
865fa048751d6ad0bc743581cad5200b3338324d | 2,192 | py | Python | indico/web/forms/fields/protection.py | jgrigera/indico | b5538f2755bc38a02313d079bac831ee3dfb44ab | [
"MIT"
] | 1 | 2018-11-12T21:29:26.000Z | 2018-11-12T21:29:26.000Z | indico/web/forms/fields/protection.py | jgrigera/indico | b5538f2755bc38a02313d079bac831ee3dfb44ab | [
"MIT"
] | 9 | 2020-09-08T09:25:57.000Z | 2022-01-13T02:59:05.000Z | indico/web/forms/fields/protection.py | jgrigera/indico | b5538f2755bc38a02313d079bac831ee3dfb44ab | [
"MIT"
] | 3 | 2020-07-20T09:09:44.000Z | 2020-10-19T00:29:49.000Z | # This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import absolute_import, unicode_literals
from flask import render_template
from markupsafe import Markup
from indico.core.db import db
from indico.core.db.sqlalchemy.protection import ProtectionMode
from indico.util.i18n import _
from indico.web.forms.fields import IndicoEnumRadioField
from indico.web.forms.widgets import JinjaWidget
| 45.666667 | 111 | 0.734945 |
865fae0cf0882393868b033ff9b36122ab7504f2 | 76,601 | py | Python | src/saml2/saml.py | masterapps-au/pysaml2 | 97ad6c066c93cb31a3c3b9d504877c02e93ca9a9 | [
"Apache-2.0"
] | null | null | null | src/saml2/saml.py | masterapps-au/pysaml2 | 97ad6c066c93cb31a3c3b9d504877c02e93ca9a9 | [
"Apache-2.0"
] | null | null | null | src/saml2/saml.py | masterapps-au/pysaml2 | 97ad6c066c93cb31a3c3b9d504877c02e93ca9a9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Generated Mon May 2 14:23:33 2011 by parse_xsd.py version 0.4.
#
# A summary of available specifications can be found at:
# https://wiki.oasis-open.org/security/FrontPage
#
# saml core specifications to be found at:
# if any question arise please query the following pdf.
# http://docs.oasis-open.org/security/saml/v2.0/saml-core-2.0-os.pdf
# The specification was later updated with errata, and the new version is here:
# https://www.oasis-open.org/committees/download.php/56776/sstc-saml-core-errata-2.0-wd-07.pdf
#
try:
from base64 import encodebytes as b64encode
except ImportError:
from base64 import b64encode
from saml2.validate import valid_ipv4, MustValueError
from saml2.validate import valid_ipv6
from saml2.validate import ShouldValueError
from saml2.validate import valid_domain_name
import saml2
from saml2 import SamlBase
import six
from saml2 import xmldsig as ds
from saml2 import xmlenc as xenc
# authentication information fields
NAMESPACE = 'urn:oasis:names:tc:SAML:2.0:assertion'
# xmlschema definition
XSD = "xs"
# xmlschema templates and extensions
XS_NAMESPACE = 'http://www.w3.org/2001/XMLSchema'
# xmlschema-instance, which contains several builtin attributes
XSI_NAMESPACE = 'http://www.w3.org/2001/XMLSchema-instance'
# xml soap namespace
NS_SOAP_ENC = "http://schemas.xmlsoap.org/soap/encoding/"
# type definitions for xmlschemas
XSI_TYPE = '{%s}type' % XSI_NAMESPACE
# nil type definition for xmlschemas
XSI_NIL = '{%s}nil' % XSI_NAMESPACE
# idp and sp communicate usually about a subject(NameID)
# the format determines the category the subject is in
# custom subject
NAMEID_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified")
# subject as email address
NAMEID_FORMAT_EMAILADDRESS = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress")
# subject as x509 key
NAMEID_FORMAT_X509SUBJECTNAME = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:X509SubjectName")
# subject as windows domain name
NAMEID_FORMAT_WINDOWSDOMAINQUALIFIEDNAME = (
"urn:oasis:names:tc:SAML:1.1:nameid-format:WindowsDomainQualifiedName")
# subject from a kerberos instance
NAMEID_FORMAT_KERBEROS = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:kerberos")
# subject as name
NAMEID_FORMAT_ENTITY = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:entity")
# linked subject
NAMEID_FORMAT_PERSISTENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:persistent")
# annonymous subject
NAMEID_FORMAT_TRANSIENT = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:transient")
# subject avaiable in encrypted format
NAMEID_FORMAT_ENCRYPTED = (
"urn:oasis:names:tc:SAML:2.0:nameid-format:encrypted")
# dicc for avaiable formats
NAMEID_FORMATS_SAML2 = (
('NAMEID_FORMAT_EMAILADDRESS', NAMEID_FORMAT_EMAILADDRESS),
('NAMEID_FORMAT_ENCRYPTED', NAMEID_FORMAT_ENCRYPTED),
('NAMEID_FORMAT_ENTITY', NAMEID_FORMAT_ENTITY),
('NAMEID_FORMAT_PERSISTENT', NAMEID_FORMAT_PERSISTENT),
('NAMEID_FORMAT_TRANSIENT', NAMEID_FORMAT_TRANSIENT),
('NAMEID_FORMAT_UNSPECIFIED', NAMEID_FORMAT_UNSPECIFIED),
)
# a profile outlines a set of rules describing how to embed SAML assertions.
# https://docs.oasis-open.org/security/saml/v2.0/saml-profiles-2.0-os.pdf
# The specification was later updated with errata, and the new version is here:
# https://www.oasis-open.org/committees/download.php/56782/sstc-saml-profiles-errata-2.0-wd-07.pdf
# XML based values for SAML attributes
PROFILE_ATTRIBUTE_BASIC = (
"urn:oasis:names:tc:SAML:2.0:profiles:attribute:basic")
# an AuthnRequest is made to initiate authentication
# authenticate the request with login credentials
AUTHN_PASSWORD = "urn:oasis:names:tc:SAML:2.0:ac:classes:Password"
# authenticate the request with login credentials, over tls/https
AUTHN_PASSWORD_PROTECTED = \
"urn:oasis:names:tc:SAML:2.0:ac:classes:PasswordProtectedTransport"
# attribute statements is key:value metadata shared with your app
# custom format
NAME_FORMAT_UNSPECIFIED = (
"urn:oasis:names:tc:SAML:2.0:attrname-format:unspecified")
# uri format
NAME_FORMAT_URI = "urn:oasis:names:tc:SAML:2.0:attrname-format:uri"
# XML-based format
NAME_FORMAT_BASIC = "urn:oasis:names:tc:SAML:2.0:attrname-format:basic"
# dicc for avaiable formats
NAME_FORMATS_SAML2 = (
('NAME_FORMAT_BASIC', NAME_FORMAT_BASIC),
('NAME_FORMAT_URI', NAME_FORMAT_URI),
('NAME_FORMAT_UNSPECIFIED', NAME_FORMAT_UNSPECIFIED),
)
# the SAML authority's decision can be predetermined by arbitrary context
# the specified action is permitted
DECISION_TYPE_PERMIT = "Permit"
# the specified action is denied
DECISION_TYPE_DENY = "Deny"
# the SAML authority cannot determine if the action is permitted or denied
DECISION_TYPE_INDETERMINATE = "Indeterminate"
# consent attributes determine wether consent has been given and under
# what conditions
# no claim to consent is made
CONSENT_UNSPECIFIED = "urn:oasis:names:tc:SAML:2.0:consent:unspecified"
# consent has been obtained
CONSENT_OBTAINED = "urn:oasis:names:tc:SAML:2.0:consent:obtained"
# consent has been obtained before the message has been initiated
CONSENT_PRIOR = "urn:oasis:names:tc:SAML:2.0:consent:prior"
# consent has been obtained implicitly
CONSENT_IMPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-implicit"
# consent has been obtained explicitly
CONSENT_EXPLICIT = "urn:oasis:names:tc:SAML:2.0:consent:current-explicit"
# no consent has been obtained
CONSENT_UNAVAILABLE = "urn:oasis:names:tc:SAML:2.0:consent:unavailable"
# no consent is needed.
CONSENT_INAPPLICABLE = "urn:oasis:names:tc:SAML:2.0:consent:inapplicable"
# Subject confirmation methods(scm), can be issued, besides the subject itself
# by third parties.
# http://docs.oasis-open.org/wss/oasis-wss-saml-token-profile-1.0.pdf
# the 3rd party is identified on behalf of the subject given private/public key
SCM_HOLDER_OF_KEY = "urn:oasis:names:tc:SAML:2.0:cm:holder-of-key"
# the 3rd party is identified by subject confirmation and must include a security header
# signing its content.
SCM_SENDER_VOUCHES = "urn:oasis:names:tc:SAML:2.0:cm:sender-vouches"
# a bearer token is issued instead.
SCM_BEARER = "urn:oasis:names:tc:SAML:2.0:cm:bearer"
def name_id_type__from_string(xml_string):
return saml2.create_class_from_xml_string(NameIDType_, xml_string)
def audience_from_string(xml_string):
return saml2.create_class_from_xml_string(Audience, xml_string)
def subject_locality_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SubjectLocalityType_, xml_string)
#..................
# ['AuthzDecisionStatement', 'EvidenceType', 'AdviceType', 'Evidence',
# 'Assertion', 'AssertionType', 'AuthzDecisionStatementType', 'Advice']
#..................
# ['Assertion', 'AssertionType', 'AdviceType', 'Advice']
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
EvidenceType_.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
Evidence.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Assertion'] = (
'assertion', [Assertion])
AssertionType_.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Advice'] = (
'advice', Advice)
Assertion.c_children['{urn:oasis:names:tc:SAML:2.0:assertion}Advice'] = (
'advice', Advice)
# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AG_IDNameQualifiers = [
('NameQualifier', 'string', False),
('SPNameQualifier', 'string', False),
]
ELEMENT_FROM_STRING = {
BaseID.c_tag: base_id_from_string,
NameID.c_tag: name_id_from_string,
NameIDType_.c_tag: name_id_type__from_string,
EncryptedElementType_.c_tag: encrypted_element_type__from_string,
EncryptedID.c_tag: encrypted_id_from_string,
Issuer.c_tag: issuer_from_string,
AssertionIDRef.c_tag: assertion_id_ref_from_string,
AssertionURIRef.c_tag: assertion_uri_ref_from_string,
Assertion.c_tag: assertion_from_string,
AssertionType_.c_tag: assertion_type__from_string,
Subject.c_tag: subject_from_string,
SubjectType_.c_tag: subject_type__from_string,
SubjectConfirmation.c_tag: subject_confirmation_from_string,
SubjectConfirmationType_.c_tag: subject_confirmation_type__from_string,
SubjectConfirmationData.c_tag: subject_confirmation_data_from_string,
SubjectConfirmationDataType_.c_tag:
subject_confirmation_data_type__from_string,
KeyInfoConfirmationDataType_.c_tag:
key_info_confirmation_data_type__from_string,
Conditions.c_tag: conditions_from_string,
ConditionsType_.c_tag: conditions_type__from_string,
Condition.c_tag: condition_from_string,
AudienceRestriction.c_tag: audience_restriction_from_string,
AudienceRestrictionType_.c_tag: audience_restriction_type__from_string,
Audience.c_tag: audience_from_string,
OneTimeUse.c_tag: one_time_use_from_string,
OneTimeUseType_.c_tag: one_time_use_type__from_string,
ProxyRestriction.c_tag: proxy_restriction_from_string,
ProxyRestrictionType_.c_tag: proxy_restriction_type__from_string,
Advice.c_tag: advice_from_string,
AdviceType_.c_tag: advice_type__from_string,
EncryptedAssertion.c_tag: encrypted_assertion_from_string,
Statement.c_tag: statement_from_string,
AuthnStatement.c_tag: authn_statement_from_string,
AuthnStatementType_.c_tag: authn_statement_type__from_string,
SubjectLocality.c_tag: subject_locality_from_string,
SubjectLocalityType_.c_tag: subject_locality_type__from_string,
AuthnContext.c_tag: authn_context_from_string,
AuthnContextType_.c_tag: authn_context_type__from_string,
AuthnContextClassRef.c_tag: authn_context_class_ref_from_string,
AuthnContextDeclRef.c_tag: authn_context_decl_ref_from_string,
AuthnContextDecl.c_tag: authn_context_decl_from_string,
AuthenticatingAuthority.c_tag: authenticating_authority_from_string,
AuthzDecisionStatement.c_tag: authz_decision_statement_from_string,
AuthzDecisionStatementType_.c_tag:
authz_decision_statement_type__from_string,
DecisionType_.c_tag: decision_type__from_string,
Action.c_tag: action_from_string,
ActionType_.c_tag: action_type__from_string,
Evidence.c_tag: evidence_from_string,
EvidenceType_.c_tag: evidence_type__from_string,
AttributeStatement.c_tag: attribute_statement_from_string,
AttributeStatementType_.c_tag: attribute_statement_type__from_string,
Attribute.c_tag: attribute_from_string,
AttributeType_.c_tag: attribute_type__from_string,
AttributeValue.c_tag: attribute_value_from_string,
EncryptedAttribute.c_tag: encrypted_attribute_from_string,
}
ELEMENT_BY_TAG = {
'BaseID': BaseID,
'NameID': NameID,
'NameIDType': NameIDType_,
'EncryptedElementType': EncryptedElementType_,
'EncryptedID': EncryptedID,
'Issuer': Issuer,
'AssertionIDRef': AssertionIDRef,
'AssertionURIRef': AssertionURIRef,
'Assertion': Assertion,
'AssertionType': AssertionType_,
'Subject': Subject,
'SubjectType': SubjectType_,
'SubjectConfirmation': SubjectConfirmation,
'SubjectConfirmationType': SubjectConfirmationType_,
'SubjectConfirmationData': SubjectConfirmationData,
'SubjectConfirmationDataType': SubjectConfirmationDataType_,
'KeyInfoConfirmationDataType': KeyInfoConfirmationDataType_,
'Conditions': Conditions,
'ConditionsType': ConditionsType_,
'Condition': Condition,
'AudienceRestriction': AudienceRestriction,
'AudienceRestrictionType': AudienceRestrictionType_,
'Audience': Audience,
'OneTimeUse': OneTimeUse,
'OneTimeUseType': OneTimeUseType_,
'ProxyRestriction': ProxyRestriction,
'ProxyRestrictionType': ProxyRestrictionType_,
'Advice': Advice,
'AdviceType': AdviceType_,
'EncryptedAssertion': EncryptedAssertion,
'Statement': Statement,
'AuthnStatement': AuthnStatement,
'AuthnStatementType': AuthnStatementType_,
'SubjectLocality': SubjectLocality,
'SubjectLocalityType': SubjectLocalityType_,
'AuthnContext': AuthnContext,
'AuthnContextType': AuthnContextType_,
'AuthnContextClassRef': AuthnContextClassRef,
'AuthnContextDeclRef': AuthnContextDeclRef,
'AuthnContextDecl': AuthnContextDecl,
'AuthenticatingAuthority': AuthenticatingAuthority,
'AuthzDecisionStatement': AuthzDecisionStatement,
'AuthzDecisionStatementType': AuthzDecisionStatementType_,
'DecisionType': DecisionType_,
'Action': Action,
'ActionType': ActionType_,
'Evidence': Evidence,
'EvidenceType': EvidenceType_,
'AttributeStatement': AttributeStatement,
'AttributeStatementType': AttributeStatementType_,
'Attribute': Attribute,
'AttributeType': AttributeType_,
'AttributeValue': AttributeValue,
'EncryptedAttribute': EncryptedAttribute,
'BaseIDAbstractType': BaseIDAbstractType_,
'ConditionAbstractType': ConditionAbstractType_,
'StatementAbstractType': StatementAbstractType_,
}
| 38.185942 | 98 | 0.6752 |
866017a177effed366d9a7810ad090cd23a963da | 1,163 | py | Python | ROS_packages/custom_ROS_envs/turtlebot2_maze_env/src/turtlebot2_maze_random.py | PierreExeter/custom_gym_envs | 2b6a1c16a4198c8d9fa64f10fe09a041826ac81a | [
"MIT"
] | 1 | 2020-09-25T01:51:58.000Z | 2020-09-25T01:51:58.000Z | ROS_packages/custom_ROS_envs/turtlebot2_maze_env/src/turtlebot2_maze_random.py | PierreExeter/custom_gym_envs | 2b6a1c16a4198c8d9fa64f10fe09a041826ac81a | [
"MIT"
] | null | null | null | ROS_packages/custom_ROS_envs/turtlebot2_maze_env/src/turtlebot2_maze_random.py | PierreExeter/custom_gym_envs | 2b6a1c16a4198c8d9fa64f10fe09a041826ac81a | [
"MIT"
] | 1 | 2021-07-16T02:55:59.000Z | 2021-07-16T02:55:59.000Z | #!/usr/bin/env python
import gym
import rospy
from openai_ros.openai_ros_common import StartOpenAI_ROS_Environment
# initialise environment
rospy.init_node('turtlebot2_maze_random', anonymous=True, log_level=rospy.WARN)
task_and_robot_environment_name = rospy.get_param('/turtlebot2/task_and_robot_environment_name')
env = StartOpenAI_ROS_Environment(task_and_robot_environment_name)
print("Environment: ", env)
print("Action space: ", env.action_space)
# print(env.action_space.high)
# print(env.action_space.low)
print("Observation space: ", env.observation_space)
print(env.observation_space.high)
print(env.observation_space.low)
for episode in range(20):
env.reset()
for t in range(100):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
print("episode: ", episode)
print("timestep: ", t)
print("obs: ", obs)
print("action:", action)
print("reward: ", reward)
print("done: ", done)
print("info: ", info)
if done:
print("Episode {} finished after {} timesteps".format(episode, t+1))
break
env.close() | 27.690476 | 96 | 0.687016 |
866094a72b6fdcd5bf322c232acd28e290c2c5aa | 3,096 | py | Python | solver.py | jacobchh/Sudoku-Solver | 946a954e8eda234760872c55fcd2354dc0a8a4f9 | [
"Apache-2.0"
] | 1 | 2020-08-04T05:11:05.000Z | 2020-08-04T05:11:05.000Z | solver.py | jacobchh/Sudoku-Solver | 946a954e8eda234760872c55fcd2354dc0a8a4f9 | [
"Apache-2.0"
] | null | null | null | solver.py | jacobchh/Sudoku-Solver | 946a954e8eda234760872c55fcd2354dc0a8a4f9 | [
"Apache-2.0"
] | null | null | null | import numpy as np
board = np.zeros(shape=(9, 9))
count = 0
if __name__ == "__main__":
main()
| 23.633588 | 105 | 0.440891 |
86609708c6740fc5dcff69f746034012abb3d227 | 1,112 | py | Python | 01_basics/01_building_expressions/02_vector_mat_soln.py | johny-c/theano_exercises | 7fd43315bf7c475a6f218091316c0bd34e0688c4 | [
"BSD-3-Clause"
] | 711 | 2015-01-10T05:39:21.000Z | 2022-03-15T23:45:45.000Z | 01_basics/01_building_expressions/02_vector_mat_soln.py | rsingh2083/theano_exercises | 7fd43315bf7c475a6f218091316c0bd34e0688c4 | [
"BSD-3-Clause"
] | 2 | 2016-06-13T06:46:58.000Z | 2017-04-14T08:21:20.000Z | 01_basics/01_building_expressions/02_vector_mat_soln.py | rsingh2083/theano_exercises | 7fd43315bf7c475a6f218091316c0bd34e0688c4 | [
"BSD-3-Clause"
] | 371 | 2015-01-16T01:31:41.000Z | 2022-03-15T11:37:30.000Z | import numpy as np
from theano import function
import theano.tensor as T
def make_vector():
"""
Returns a new Theano vector.
"""
return T.vector()
def make_matrix():
"""
Returns a new Theano matrix.
"""
return T.matrix()
def elemwise_mul(a, b):
"""
a: A theano matrix
b: A theano matrix
Returns the elementwise product of a and b
"""
return a * b
def matrix_vector_mul(a, b):
"""
a: A theano matrix
b: A theano vector
Returns the matrix-vector product of a and b
"""
return T.dot(a, b)
if __name__ == "__main__":
a = make_vector()
b = make_vector()
c = elemwise_mul(a, b)
d = make_matrix()
e = matrix_vector_mul(d, c)
f = function([a, b, d], e)
rng = np.random.RandomState([1, 2, 3])
a_value = rng.randn(5).astype(a.dtype)
b_value = rng.rand(5).astype(b.dtype)
c_value = a_value * b_value
d_value = rng.randn(5, 5).astype(d.dtype)
expected = np.dot(d_value, c_value)
actual = f(a_value, b_value, d_value)
assert np.allclose(actual, expected)
print "SUCCESS!"
| 19.508772 | 48 | 0.607014 |
8660a9342ead6210c470087662e4e506c3d6349b | 2,863 | py | Python | nova/api/openstack/compute/used_limits.py | bopopescu/nova-8 | 768d7cc0a632e1a880f00c5840c1ec8051e161be | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/used_limits.py | bopopescu/nova-8 | 768d7cc0a632e1a880f00c5840c1ec8051e161be | [
"Apache-2.0"
] | null | null | null | nova/api/openstack/compute/used_limits.py | bopopescu/nova-8 | 768d7cc0a632e1a880f00c5840c1ec8051e161be | [
"Apache-2.0"
] | 1 | 2020-07-22T21:09:15.000Z | 2020-07-22T21:09:15.000Z | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import api_version_request
from nova.api.openstack.api_version_request \
import MIN_WITHOUT_PROXY_API_SUPPORT_VERSION
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.policies import used_limits as ul_policies
from nova import quota
QUOTAS = quota.QUOTAS
| 35.7875 | 78 | 0.625218 |
86612e58c1d3c9004b21a40197263a8e6dc182a5 | 7,362 | py | Python | tf_agents/bandits/agents/examples/v2/trainer.py | howards11/agents | 8d5627d9b9c3680468a63564c25a4d82fa1befb0 | [
"Apache-2.0"
] | 3,175 | 2017-09-08T18:28:32.000Z | 2022-03-31T01:32:22.000Z | tf_agents/bandits/agents/examples/v2/trainer.py | MFosset/agents | 756f7bdf493986c25eb585438134f1dbb8045b1b | [
"Apache-2.0"
] | 703 | 2017-09-18T05:51:57.000Z | 2022-03-31T17:37:50.000Z | tf_agents/bandits/agents/examples/v2/trainer.py | MFosset/agents | 756f7bdf493986c25eb585438134f1dbb8045b1b | [
"Apache-2.0"
] | 844 | 2017-09-08T23:28:57.000Z | 2022-03-30T09:29:32.000Z | # coding=utf-8
# Copyright 2020 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generic TF-Agents training function for bandits."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import logging
import tensorflow as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.drivers import dynamic_step_driver
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.policies import policy_saver
from tf_agents.replay_buffers import tf_uniform_replay_buffer
tf = tf.compat.v2
AGENT_CHECKPOINT_NAME = 'agent'
STEP_CHECKPOINT_NAME = 'step'
CHECKPOINT_FILE_PREFIX = 'ckpt'
def get_replay_buffer(data_spec,
batch_size,
steps_per_loop):
"""Return a `TFUniformReplayBuffer` for the given `agent`."""
buf = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=data_spec,
batch_size=batch_size,
max_length=steps_per_loop)
return buf
def get_training_loop_fn(driver, replay_buffer, agent, steps):
"""Returns a `tf.function` that runs the driver and training loops.
Args:
driver: an instance of `Driver`.
replay_buffer: an instance of `ReplayBuffer`.
agent: an instance of `TFAgent`.
steps: an integer indicating how many driver steps should be
executed and presented to the trainer during each training loop.
"""
def training_loop():
"""Returns a `tf.function` that runs the training loop."""
driver.run()
batch_size = driver.env.batch_size
dataset = replay_buffer.as_dataset(
sample_batch_size=batch_size,
num_steps=steps,
single_deterministic_pass=True)
experience, unused_info = tf.data.experimental.get_single_element(dataset)
set_expected_shape(experience, steps)
loss_info = agent.train(experience)
replay_buffer.clear()
return loss_info
return training_loop
def restore_and_get_checkpoint_manager(root_dir, agent, metrics, step_metric):
"""Restores from `root_dir` and returns a function that writes checkpoints."""
trackable_objects = {metric.name: metric for metric in metrics}
trackable_objects[AGENT_CHECKPOINT_NAME] = agent
trackable_objects[STEP_CHECKPOINT_NAME] = step_metric
checkpoint = tf.train.Checkpoint(**trackable_objects)
checkpoint_manager = tf.train.CheckpointManager(checkpoint=checkpoint,
directory=root_dir,
max_to_keep=5)
latest = checkpoint_manager.latest_checkpoint
if latest is not None:
logging.info('Restoring checkpoint from %s.', latest)
checkpoint.restore(latest)
logging.info('Successfully restored to step %s.', step_metric.result())
else:
logging.info('Did not find a pre-existing checkpoint. '
'Starting from scratch.')
return checkpoint_manager
def train(root_dir,
agent,
environment,
training_loops,
steps_per_loop,
additional_metrics=(),
training_data_spec_transformation_fn=None):
"""Perform `training_loops` iterations of training.
Checkpoint results.
If one or more baseline_reward_fns are provided, the regret is computed
against each one of them. Here is example baseline_reward_fn:
def baseline_reward_fn(observation, per_action_reward_fns):
rewards = ... # compute reward for each arm
optimal_action_reward = ... # take the maximum reward
return optimal_action_reward
Args:
root_dir: path to the directory where checkpoints and metrics will be
written.
agent: an instance of `TFAgent`.
environment: an instance of `TFEnvironment`.
training_loops: an integer indicating how many training loops should be run.
steps_per_loop: an integer indicating how many driver steps should be
executed and presented to the trainer during each training loop.
additional_metrics: Tuple of metric objects to log, in addition to default
metrics `NumberOfEpisodes`, `AverageReturnMetric`, and
`AverageEpisodeLengthMetric`.
training_data_spec_transformation_fn: Optional function that transforms the
data items before they get to the replay buffer.
"""
# TODO(b/127641485): create evaluation loop with configurable metrics.
if training_data_spec_transformation_fn is None:
data_spec = agent.policy.trajectory_spec
else:
data_spec = training_data_spec_transformation_fn(
agent.policy.trajectory_spec)
replay_buffer = get_replay_buffer(data_spec, environment.batch_size,
steps_per_loop)
# `step_metric` records the number of individual rounds of bandit interaction;
# that is, (number of trajectories) * batch_size.
step_metric = tf_metrics.EnvironmentSteps()
metrics = [
tf_metrics.NumberOfEpisodes(),
tf_metrics.AverageEpisodeLengthMetric(batch_size=environment.batch_size)
] + list(additional_metrics)
if isinstance(environment.reward_spec(), dict):
metrics += [tf_metrics.AverageReturnMultiMetric(
reward_spec=environment.reward_spec(),
batch_size=environment.batch_size)]
else:
metrics += [
tf_metrics.AverageReturnMetric(batch_size=environment.batch_size)]
if training_data_spec_transformation_fn is not None:
add_batch_fn = lambda data: replay_buffer.add_batch( # pylint: disable=g-long-lambda
training_data_spec_transformation_fn(data))
else:
add_batch_fn = replay_buffer.add_batch
observers = [add_batch_fn, step_metric] + metrics
driver = dynamic_step_driver.DynamicStepDriver(
env=environment,
policy=agent.collect_policy,
num_steps=steps_per_loop * environment.batch_size,
observers=observers)
training_loop = get_training_loop_fn(
driver, replay_buffer, agent, steps_per_loop)
checkpoint_manager = restore_and_get_checkpoint_manager(
root_dir, agent, metrics, step_metric)
train_step_counter = tf.compat.v1.train.get_or_create_global_step()
saver = policy_saver.PolicySaver(agent.policy, train_step=train_step_counter)
summary_writer = tf.summary.create_file_writer(root_dir)
summary_writer.set_as_default()
for i in range(training_loops):
training_loop()
metric_utils.log_metrics(metrics)
for metric in metrics:
metric.tf_summaries(train_step=step_metric.result())
checkpoint_manager.save()
if i % 100 == 0:
saver.save(os.path.join(root_dir, 'policy_%d' % step_metric.result()))
| 37.948454 | 89 | 0.740424 |
86614dcad65e20388a5967a40083bdb556db6db0 | 2,469 | py | Python | rally_openstack/cfg/manila.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/cfg/manila.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | null | null | null | rally_openstack/cfg/manila.py | RSE-Cambridge/rally-openstack | 32bbc091bbce1db625a2fc22da28b32718befa13 | [
"Apache-2.0"
] | 1 | 2018-12-10T12:31:27.000Z | 2018-12-10T12:31:27.000Z | # Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
OPTS = {"openstack": [
cfg.FloatOpt(
"manila_share_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Delay between creating Manila share and polling for its "
"status."),
cfg.FloatOpt(
"manila_share_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Timeout for Manila share creation."),
cfg.FloatOpt(
"manila_share_create_poll_interval",
default=3.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila share "
"creation."),
cfg.FloatOpt(
"manila_share_delete_timeout",
default=180.0,
deprecated_group="benchmark",
help="Timeout for Manila share deletion."),
cfg.FloatOpt(
"manila_share_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila share "
"deletion."),
cfg.FloatOpt(
"manila_access_create_timeout",
default=300.0,
deprecated_group="benchmark",
help="Timeout for Manila access creation."),
cfg.FloatOpt(
"manila_access_create_poll_interval",
default=3.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila access "
"creation."),
cfg.FloatOpt(
"manila_access_delete_timeout",
default=180.0,
deprecated_group="benchmark",
help="Timeout for Manila access deletion."),
cfg.FloatOpt(
"manila_access_delete_poll_interval",
default=2.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for Manila access "
"deletion."),
]}
| 35.271429 | 78 | 0.651681 |
86624e00bb7b419aff83121a582546742f805433 | 571 | py | Python | app/backend/app/crud/crud_register_invoice.py | matayoos/invoice-scrapper | d36c944c10714e61d304693d0fce28769d2a746a | [
"MIT"
] | null | null | null | app/backend/app/crud/crud_register_invoice.py | matayoos/invoice-scrapper | d36c944c10714e61d304693d0fce28769d2a746a | [
"MIT"
] | null | null | null | app/backend/app/crud/crud_register_invoice.py | matayoos/invoice-scrapper | d36c944c10714e61d304693d0fce28769d2a746a | [
"MIT"
] | null | null | null | from sqlalchemy.orm.session import Session
from app import crud
from .utils import insert, get_content
| 27.190476 | 83 | 0.749562 |
8666c057450744d94668536ee8580d907346f31a | 28,602 | py | Python | tools/genapixml.py | garronej/linphone | f61a337f5363b991d6e866a6aa7d303658c04073 | [
"BSD-2-Clause"
] | null | null | null | tools/genapixml.py | garronej/linphone | f61a337f5363b991d6e866a6aa7d303658c04073 | [
"BSD-2-Clause"
] | null | null | null | tools/genapixml.py | garronej/linphone | f61a337f5363b991d6e866a6aa7d303658c04073 | [
"BSD-2-Clause"
] | 1 | 2021-03-17T10:04:06.000Z | 2021-03-17T10:04:06.000Z | #!/usr/bin/python
# Copyright (C) 2014 Belledonne Communications SARL
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import argparse
import os
import six
import string
import sys
import xml.etree.ElementTree as ET
import xml.dom.minidom as minidom
import metadoc
def main(argv = None):
if argv is None:
argv = sys.argv
argparser = argparse.ArgumentParser(description="Generate XML version of the Linphone API.")
argparser.add_argument('-o', '--outputfile', metavar='outputfile', type=argparse.FileType('w'), help="Output XML file describing the Linphone API.")
argparser.add_argument('--verbose', help="Increase output verbosity", action='store_true')
argparser.add_argument('--pretty', help="XML pretty print", action='store_true')
argparser.add_argument('xmldir', help="XML directory generated by doxygen.")
args = argparser.parse_args()
if args.outputfile == None:
args.outputfile = open('api.xml', 'w')
project = Project()
if args.verbose:
project.verbose = True
if args.pretty:
project.prettyPrint = True
project.initFromDir(args.xmldir)
project.check()
gen = Generator(args.outputfile)
gen.generate(project)
if __name__ == "__main__":
sys.exit(main())
| 35.977358 | 149 | 0.706559 |
866731500bf9de7d963d33a61b133cfd0fb18eda | 1,727 | py | Python | examples/src/python/join_streamlet_topology.py | aaronstjohn/incubator-heron | bdc35f8d23296472983956a477ea38da54d16b2b | [
"Apache-2.0"
] | 2 | 2016-07-04T07:10:31.000Z | 2018-03-28T16:59:02.000Z | examples/src/python/join_streamlet_topology.py | aaronstjohn/incubator-heron | bdc35f8d23296472983956a477ea38da54d16b2b | [
"Apache-2.0"
] | 1 | 2019-05-08T22:30:16.000Z | 2019-05-08T22:30:16.000Z | examples/src/python/join_streamlet_topology.py | aaronstjohn/incubator-heron | bdc35f8d23296472983956a477ea38da54d16b2b | [
"Apache-2.0"
] | 1 | 2017-06-05T17:55:45.000Z | 2017-06-05T17:55:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
'''join_streamlet_topology.py: module is an example of how to use the join operator'''
import sys
from heronpy.streamlet.builder import Builder
from heronpy.streamlet.runner import Runner
from heronpy.streamlet.config import Config
from heronpy.streamlet.windowconfig import WindowConfig
from heronpy.connectors.mock.arraylooper import ArrayLooper
# pylint: disable=superfluous-parens
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Topology's name is not specified")
sys.exit(1)
builder = Builder()
source_1 = builder.new_source(ArrayLooper([["key1", "a"], ["key1", "b"]], sleep=1))
source_2 = builder.new_source(ArrayLooper([["key1", "c"], ["key1", "d"]], sleep=1))
source_1.join(source_2, WindowConfig.create_sliding_window(2, 1), lambda x, y: x + y).log()
runner = Runner()
config = Config()
runner.run(sys.argv[1], config, builder)
| 35.979167 | 93 | 0.735379 |
86686cf65534bfae5dd8d13670449f7c68cf0bb3 | 2,226 | py | Python | yolk/test/utils.py | yolkdata/yolk-python | 978d98cbe637c1309a1be766a40bb874e996c61d | [
"MIT",
"Unlicense"
] | null | null | null | yolk/test/utils.py | yolkdata/yolk-python | 978d98cbe637c1309a1be766a40bb874e996c61d | [
"MIT",
"Unlicense"
] | null | null | null | yolk/test/utils.py | yolkdata/yolk-python | 978d98cbe637c1309a1be766a40bb874e996c61d | [
"MIT",
"Unlicense"
] | null | null | null | from datetime import date, datetime, timedelta
from decimal import Decimal
import unittest
from dateutil.tz import tzutc
import six
from yolk import utils
| 28.177215 | 75 | 0.574573 |
866878f76f3d3d6bb3a8d89014200d8e8b85019b | 2,797 | py | Python | 09Scan/matrix.py | kw1122/MKS66 | 25986e79077692afbc085920af1fef276c22d967 | [
"MIT"
] | null | null | null | 09Scan/matrix.py | kw1122/MKS66 | 25986e79077692afbc085920af1fef276c22d967 | [
"MIT"
] | null | null | null | 09Scan/matrix.py | kw1122/MKS66 | 25986e79077692afbc085920af1fef276c22d967 | [
"MIT"
] | null | null | null | """
A matrix will be an N sized list of 4 element lists.
Each individual list will represent an [x, y, z, 1] point.
For multiplication purposes, consider the lists like so:
x0 x1 xn
y0 y1 yn
z0 z1 ... zn
1 1 1
"""
import math
#print the matrix such that it looks like
#the template in the top comment
#turn the paramter matrix into an identity matrix
#you may assume matrix is square
#multiply m1 by m2, modifying m2 to be the product
#m1 * m2 -> m2
| 22.739837 | 59 | 0.464784 |
86695a9d77f6427fc910ad0a37e4e6d95359ee20 | 1,918 | py | Python | tests/test.py | Nekmo/spice | 717a2cc24ad969e1caec2aabeffc30a796c6ec91 | [
"MIT"
] | null | null | null | tests/test.py | Nekmo/spice | 717a2cc24ad969e1caec2aabeffc30a796c6ec91 | [
"MIT"
] | null | null | null | tests/test.py | Nekmo/spice | 717a2cc24ad969e1caec2aabeffc30a796c6ec91 | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import sys, os
from time import sleep
sys.path.insert(0, '/home/may/Dropbox/Programming/spice/')
import spice_api as spice
if __name__ == '__main__':
main()
| 34.25 | 101 | 0.717414 |
866961c7ba40ad3796162e785a123e4decd9a074 | 6,648 | py | Python | backend/project/settings.py | prog-serhii/MyMoney_v2 | 8d2aa3ec0497c7afd1a25bb9266bfc405e9c9397 | [
"MIT"
] | 1 | 2020-11-09T10:32:05.000Z | 2020-11-09T10:32:05.000Z | backend/project/settings.py | prog-serhii/MyMoney_v2 | 8d2aa3ec0497c7afd1a25bb9266bfc405e9c9397 | [
"MIT"
] | null | null | null | backend/project/settings.py | prog-serhii/MyMoney_v2 | 8d2aa3ec0497c7afd1a25bb9266bfc405e9c9397 | [
"MIT"
] | null | null | null | import os
from pathlib import Path
from datetime import timedelta
from celery.schedules import crontab
from django.utils.translation import gettext_lazy as _
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = os.environ.get(
'SECRET_KEY',
default='m8/o)3$n^03w)mxgvnrxb46__@6qnte9l0dkb7$6%lpbcox+v!'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get('DEBUG', default=1))
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS", default='*').split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'django_filters',
'rosetta',
'djoser',
'djmoney',
'djmoney.contrib.exchange',
'corsheaders',
'apps.account.apps.AccountConfig',
'apps.transaction.apps.TransactionConfig',
'apps.common.apps.CommonConfig',
'apps.authentication.apps.AuthenticationConfig'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware'
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get('SQL_ENGINE', 'django.db.backends.sqlite3'),
'NAME': os.environ.get('SQL_DATABASE', os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.environ.get('SQL_USER'),
'PASSWORD': os.environ.get('SQL_PASSWORD'),
'HOST': os.environ.get('SQL_HOST'),
'PORT': os.environ.get('SQL_PORT'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
AUTH_USER_MODEL = 'authentication.User'
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'uk'
LANGUAGES = [
('en', _('English')),
('uk', _('Ukrainian'))
]
LOCALE_PATHS = (
BASE_DIR / 'locale',
)
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = "/static/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# ------------------------------------------ #
# django-money #
# ------------------------------------------ #
DEFAULT_CURRENCY = 'EUR'
EXCHANGE_BACKEND = 'djmoney.contrib.exchange.backends.FixerBackend'
FIXER_ACCESS_KEY = 'f5a898dbf45d15d8aa6eca7af3f372e1'
# ------------------------------------------ #
# Celery #
# ------------------------------------------ #
CELERY_BROKER_URL = os.environ.get('REDIS_LOCATION', 'redis://127.0.0.1:6379')
CELERY_RESULT_BACKEND = os.environ.get('REDIS_LOCATION', 'redis://127.0.0.1:6379')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_SERIALIZER = 'json'
CELERY_BEAT_SCHEDULE = {
'update_rates': {
'task': 'apps.user.tasks.update_rates',
'schedule': crontab(hour="*/1"),
}
}
# ------------------------------------------ #
# Django REST Framework #
# ------------------------------------------ #
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework_simplejwt.authentication.JWTAuthentication',
),
'EXCEPTION_HANDLER': 'apps.common.errors.custom_exception_handler',
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
# ------------------------------------------ #
# djoser #
# ------------------------------------------ #
DJOSER = {
# Name of a field in User model to be used as login field
'LOGIN_FIELD': 'email',
# If True user will be required to click activation
# link sent in email after:
# * creating an account
# * updating their email
'SEND_ACTIVATION_EMAIL': True,
'ACTIVATION_URL': '/activate/{uid}/{token}',
'PASSWORD_RESET_CONFIRM_URL': 'password/reset/confirm/{uid}/{token}',
'USERNAME_RESET_CONFIRM_URL': 'eamil/reset/confirm/{uid}/{token}',
# If True, you need to pass re_password to /users/
# endpoint, to validate password equality.
'USER_CREATE_PASSWORD_RETYPE': True,
'PASSWORD_RESET_CONFIRM_RETYPE': True,
# If True, register or activation endpoint
# will send confirmation email to user.
'SEND_CONFIRMATION_EMAIL': True,
'SERIALIZERS': {
'user_create': 'apps.user.serializers.UserCreateSerializer'
}
}
# ------------------------------------------ #
# Simple JWT #
# ------------------------------------------ #
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': timedelta(minutes=5),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
'ROTATE_REFRESH_TOKENS': True,
'AUTH_HEADER_TYPES': ('JWT',),
'USER_ID_FIELD': 'id',
'USER_ID_CLAIM': 'id',
}
CORS_ALLOW_ALL_ORIGINS = True
| 29.546667 | 91 | 0.626203 |
866977a21872a2e8438dbbd5f5b289547da5a50c | 641 | py | Python | app/flaskApp/config.py | jeanmarc2019/PTHacks2019-Planning | bc0c71588187fde8498494b3e74728c09de56f18 | [
"MIT"
] | null | null | null | app/flaskApp/config.py | jeanmarc2019/PTHacks2019-Planning | bc0c71588187fde8498494b3e74728c09de56f18 | [
"MIT"
] | null | null | null | app/flaskApp/config.py | jeanmarc2019/PTHacks2019-Planning | bc0c71588187fde8498494b3e74728c09de56f18 | [
"MIT"
] | null | null | null | import configparser
import os
dir_path = os.path.dirname(os.path.realpath(__file__))
dir_path += '/cfg.ini'
| 29.136364 | 73 | 0.669267 |
866b10a749a0729a15fdb56c60d4ea92a50f3344 | 3,270 | py | Python | neutron/db/models/l3ha.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | 4 | 2018-08-05T00:43:03.000Z | 2021-10-13T00:45:45.000Z | neutron/db/models/l3ha.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | 8 | 2018-06-14T14:50:16.000Z | 2018-11-13T16:30:42.000Z | neutron/db/models/l3ha.py | cleo4zheng/neutron | 6d65318308edfd984bdd0ff1ac7fef9486a040f7 | [
"Apache-2.0"
] | 7 | 2018-06-12T18:57:04.000Z | 2019-05-09T15:42:30.000Z | # Copyright (C) 2014 eNovance SAS <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.db import model_base
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.common import constants as n_const
from neutron.db.models import agent as agent_model
from neutron.db import models_v2
| 37.159091 | 76 | 0.644954 |
866b4e64606eb0b8b047d742a5c885f477addc0c | 1,551 | py | Python | authentication/migrate.py | anae09/electionWebService | 756968e5cd6db1422ae5fe8445a9e92a25953073 | [
"MIT"
] | null | null | null | authentication/migrate.py | anae09/electionWebService | 756968e5cd6db1422ae5fe8445a9e92a25953073 | [
"MIT"
] | null | null | null | authentication/migrate.py | anae09/electionWebService | 756968e5cd6db1422ae5fe8445a9e92a25953073 | [
"MIT"
] | null | null | null | from flask import Flask;
from configuration import Configuration;
from flask_migrate import Migrate, init, migrate, upgrade;
from models import database, Role, UserRole, User;
from sqlalchemy_utils import database_exists, create_database;
application = Flask(__name__);
application.config.from_object(Configuration);
migrateObject = Migrate(application, database);
done = False;
while not done:
try:
if not database_exists(application.config["SQLALCHEMY_DATABASE_URI"]):
create_database(application.config["SQLALCHEMY_DATABASE_URI"]);
database.init_app(application);
with application.app_context() as context:
init();
migrate(message="Production migration");
upgrade();
adminRole = Role(name="administrator");
userRole = Role(name="user");
database.session.add(adminRole);
database.session.add(userRole);
database.session.commit();
admin = User(
jmbg="0000000000000",
forename="admin",
surname="admin",
email="[email protected]",
password="1"
);
database.session.add(admin);
database.session.commit();
userRole = UserRole(
userId=admin.id,
roleId=adminRole.id
);
database.session.add(userRole);
database.session.commit();
done = True;
except Exception as err:
print(err);
| 28.2 | 78 | 0.595745 |
866b8b6db3282415cf332ea707795a1897c51203 | 4,066 | py | Python | output/ensemble_analysis.py | gitter-lab/pria-ams-enamine | b37bc7edf3c21af6653267ecd4bb9fd232eeb575 | [
"MIT"
] | 1 | 2021-09-28T23:10:05.000Z | 2021-09-28T23:10:05.000Z | output/ensemble_analysis.py | gitter-lab/pria-ams-enamine | b37bc7edf3c21af6653267ecd4bb9fd232eeb575 | [
"MIT"
] | null | null | null | output/ensemble_analysis.py | gitter-lab/pria-ams-enamine | b37bc7edf3c21af6653267ecd4bb9fd232eeb575 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
import json
import numpy as np
if __name__ == '__main__':
model_list = [
'random_forest_classification',
'xgboost_classification', 'xgboost_regression',
'single_deep_classification', 'single_deep_regression'
]
model_process_num_list = {
'random_forest_classification': [139, 69, 111, 212, 210, 148, 28, 61, 124, 130, 131, 141, 14, 38, 165, 65, 123, 94, 3, 88, 72],
'xgboost_classification': [140, 967, 960, 807, 263, 694, 440, 47, 116, 792, 663, 32, 564, 950, 735, 84, 364, 605, 431, 55, 388],
'xgboost_regression': [187, 6, 514, 507, 880, 440, 605, 718, 754, 409, 586, 214, 753, 65, 294, 911, 721, 81, 321, 545, 280],
'single_deep_classification': [356, 404, 215, 93, 254, 88, 423, 47, 363, 132, 5, 385, 370, 29, 415, 54, 124, 183, 180, 416],
'single_deep_regression': [199, 323, 114, 123, 47, 175, 17, 178, 106, 265, 67, 157, 369, 115, 191, 20, 27, 108, 270, 45],
'ensemble': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
}
for model in model_list:
print('Model: {}'.format(model))
number = len(model_process_num_list[model])
hyper_parameter_result_roc = []
hyper_parameter_result_precision = []
hyper_parameter_result_NEF = []
for running_process in model_process_num_list[model]:
test_roc_list, test_precision_list, test_NEF_list = [], [], []
for idx in range(4):
file_path = '{}/{}_{}_{}.out'.format(model, model, running_process, idx)
test_roc, test_precision, test_NEF = extract(file_path)
if test_roc == -1 and test_precision == -1:
print('missing index: {}'.format(running_process))
if test_roc != -1:
test_roc_list.append(test_roc)
if test_precision != -1:
test_precision_list.append(test_precision)
if test_NEF != -1:
test_NEF_list.append(test_NEF)
hyper_parameter_result_roc.append(np.mean(test_roc_list))
hyper_parameter_result_precision.append(np.mean(test_precision_list))
hyper_parameter_result_NEF.append(np.mean(test_NEF_list))
for running_process, roc, pr, NEF in zip(model_process_num_list[model], hyper_parameter_result_roc, hyper_parameter_result_precision, hyper_parameter_result_NEF):
print('{}\t{}\t{}\t{}'.format(running_process, roc, pr, NEF))
print()
print('On The Last Folder')
model_list = [
'random_forest_classification',
'xgboost_classification', 'xgboost_regression',
'single_deep_classification', 'single_deep_regression',
'ensemble'
]
for model in model_list:
print('Model: {}'.format(model))
number = len(model_process_num_list[model])
for running_process in model_process_num_list[model]:
if model == 'ensemble':
file_path = '{}/{}.out'.format(model, running_process)
else:
file_path = '{}/{}_{}_4.out'.format(model, model, running_process)
test_roc, test_precision, test_NEF = extract(file_path)
print('{}\t{}'.format(running_process, test_NEF))
print() | 42.354167 | 171 | 0.58485 |
866be250cb91ad06867da752bf60c3e580b71448 | 1,687 | py | Python | openstack_dashboard/test/integration_tests/regions/messages.py | ankur-gupta91/block_storage | 938548a3d4507dc56c1c26b442767eb41aa2e610 | [
"Apache-2.0"
] | 1 | 2021-01-02T03:34:19.000Z | 2021-01-02T03:34:19.000Z | openstack_dashboard/test/integration_tests/regions/messages.py | ankur-gupta91/block_storage | 938548a3d4507dc56c1c26b442767eb41aa2e610 | [
"Apache-2.0"
] | null | null | null | openstack_dashboard/test/integration_tests/regions/messages.py | ankur-gupta91/block_storage | 938548a3d4507dc56c1c26b442767eb41aa2e610 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common import by
from openstack_dashboard.test.integration_tests.regions import baseregion
ERROR = 'alert-danger'
INFO = 'alert-info'
SUCCESS = 'alert-success'
| 36.673913 | 79 | 0.711322 |
866d362b7d20329b9a8556ff353eba1624b11b05 | 8,175 | py | Python | model_input.py | bgarbin/GUIDE | 06bca4e696b97ca14c11d74844d3b3ab7287f8f1 | [
"MIT"
] | null | null | null | model_input.py | bgarbin/GUIDE | 06bca4e696b97ca14c11d74844d3b3ab7287f8f1 | [
"MIT"
] | null | null | null | model_input.py | bgarbin/GUIDE | 06bca4e696b97ca14c11d74844d3b3ab7287f8f1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
#import cmath as cm
# Main parameters for window
# 'record_every': number of time_steps one between two consecutive record events
window_params = {'kernel': 'RK4','nstep_update_plot': 100, 'step_size': 0.01, 'array_size': 10000, 'streaming': True, 'record_state':False, 'nstep_record':1, 'window_size':(1200,1000), 'invert_order_obs_var': True,'theme':'dark'}
# Definition of the plot configuration
def load_docks():
''' Returns a dict to be used for plots declaration. Here, we use pyqtgraph docks. Each plot has a dictionnary as "value" with keys: "type" (accepted values: 'plot' and 'image'), "zoomOf" (key name of another dock), "position" (accepted values: 'bottom', 'top', 'left', 'right', 'above', or 'below'), "relativeTo" (optional, key name of another dock; position relative to another dock), size [(xlength,ylength); note that lengths arguments are only a suggestion; docks will still have to fill the entire dock area and obey the limits of their internal widgets], "labels" (dict of position:str), "title" (str). '''
docks = {
'plot1' : {'type': 'plot1D' , 'position': 'left' , 'size': (500,500), 'labels':{'bottom':'Time (arb. units)','left':'Intensity (arb. units)'}},
'phase_space' : {'type': 'plot2D', 'position': 'right', 'size': (300,300)},
'plot2' : {'type': 'plot1D' , 'zoomOf': 'plot1' , 'position': 'bottom', 'relativeTo': 'phase_space', 'size': (300,100)},
'plot3' : {'type': 'plot1D', 'position': 'top','relativeTo':'phase_space', 'size': (300,300)},
'custom_name' : {'type': 'image', 'position': 'above','relativeTo':'plot3', 'size': (300,300)},
}
return docks
def load_variables():
''' Returns a dict of the variables. Each variable is a dict with keys: "type" (e.g. np.float64, np.complex128), "init_cond" (type), "plot" (bool, optional default is True), "dock" (list of key name(s) of docks [str] as defined in load_dock function; optional; if not provided, will be ploted on every plot), "equation" (callable, optional default is diff_eq_{variable_name}), "help" (str, to be displayed in help message). Additionnal keys are added internally: "value", "observable" (False), "lineedit", "checkbox". '''
variables = {
'A' : {'type': np.complex128, 'init_cond': 0., 'plot': False, 'dock':['plot1','plot2'], 'help':'field in the first cavity'},
'B' : {'type': np.complex128, 'init_cond': 0.001, 'plot': False, 'equation': diff_eq_B}
}
return variables
def load_observables():
''' Returns a dict of the observables. Similar to variables, observables are added internally to the dictionnary of variables. Each observable is a dict with keys: "type" (e.g. np.float64, np.complex128), "init_cond" (type), "plot" (bool, optional default is True), "dock" (list of key name(s) of docks [str] as defined in load_dock function; optional; if not provided, will be ploted on every plot), "equation" (callable, optional default is eq_{variable_name}), "calculation_size" (bool, whether you want according variable to be only the size of what calculation returns; WARNING: those items won't be stored), "help" (str, to be displayed in help message). Additionnal keys are added internally: "value", "observable" (True), "lineedit", "checkbox". '''
observables = {
'mod_A' : {'type': np.float64, 'init_cond': 0., 'plot': True, 'dock':['plot1','plot2'], 'help':'modulus square of A'},
'mod_B' : {'type': np.float64, 'init_cond': 0., 'dock':['plot1','plot2','plot3']},
'mod_A_2' : {'type': np.float64, 'init_cond': 0., 'plot': True, 'dock':[{'phase_space':['mod_A_2','mod_B_2']}],'calculation_size':True, 'help':'abs(A)**2 shorter to be plotted in phase space'},
'mod_B_2' : {'type': np.float64, 'init_cond': 0. ,'dock':[{'phase_space':['mod_B_2','mod_A_2']}],'calculation_size':True},
'mod_A_2D' : {'type': np.float64, 'init_cond': 0. ,'dock':['custom_name'],'calculation_size':True,'help':'variable to be used plotted in image'},
#'ph_A' : {'type': np.float64, 'init_cond': 0., 'dock':['plot3']},
#'ph_B' : {'type': np.float64, 'init_cond': 0., 'dock':['plot3']}
}
return observables
def load_params():
''' Returns a dict of the parameters. Similarly to variables/observables, each parameter has a dictionnary as "value" with keys: "init_cond" (float), "min" (float), "max" (float), step (float or int; WARNING if int this parameter will be an integer), "help" (str, to be displayed in help message). Additionnal keys are added internally: "value", "spinbox", "slider", "slider_conversion_factor". '''
params = {}
params['delta'] = {'init_cond': -8., 'min': -10., 'max': 10., 'step': 0.01, 'help':'detuning parameter'}
params['f'] = {'init_cond': 4.8, 'min': 0. , 'max': 20., 'step': 0.01}
params['kappa'] = {'init_cond': 2.8, 'min': 0. , 'max': 10., 'step': 0.01}
params['gamma'] = {'init_cond': 0. , 'min': -1. , 'max': 1., 'step': 0.01}
params['tau'] = {'init_cond': 1. , 'min': 0. , 'max': 10., 'step': 0.01}
params['npts_PS'] = {'init_cond': 1000 , 'min': 1 , 'max': 2000, 'step': 1}
params['folding'] = {'init_cond': 100 , 'min': 1 , 'max': 1000, 'step': 1}
params['min_scan'] = {'init_cond': 0, 'min': 0., 'max': 500., 'step': 0.01, 'help':'detuning parameter'}
params['max_scan'] = {'init_cond': 10, 'min': 0., 'max': 500., 'step': 0.01, 'help':'detuning parameter'}
params['step_scan'] = {'init_cond': 0.05, 'min': 0.001, 'max': 10., 'step': 0.001, 'help':'detuning parameter'}
params['nstep_scan'] = {'init_cond': 50, 'min': 0, 'max': 500, 'step': 1, 'help':'detuning parameter'}
return params
# BEGIN Declaration of the equations. Automatically recognized pattern are "diff_eq_{variable}" (variables) and "eq_{observable}" (observables); with a name after the pattern that must match the variable/observable's one. Alternatively, you may use custom equation names. You should declare it in the variable/observable dictionnary with keyword "equation".
#def eq_ph_A(variables,params):
#return [cm.phase(temp) for temp in variables['A']] #np.array(np.arctan2(np.imag(variables['A']), np.real(variables['A'])))
#def eq_ph_B(variables,params):
#return [cm.phase(temp) for temp in variables['B']]
def keyboard_keys():
""" Returns a dictionnary of user defined keys of form key:callable. System reserved keys: [" ", "q", "h", "s", "r", "i", "c"]. This must return an empty dict if no extra keys. """
keys = {
't': ramp_f,
}
return keys
#return {}
def kernel_my_own(variables,params):
''' Takes as arguments dicts of variables and params as {'key':value}. Returns a dict of the results with the same form. For now the function name must start with "kernel_" '''
pass
| 65.4 | 761 | 0.651865 |
866eb114075c78f8e3231df363ccab857402a80e | 1,464 | py | Python | input/EnvEq/pairwise/Tneg-Tpro/u_lim_o2Tpro-u_lim_o2Tneg/parallelizer.py | Harshavardhan-BV/Cancer-compe-strat | e4decacd5779e85a68c81d0ce3bedf42dea2964f | [
"MIT"
] | 1 | 2020-10-18T15:54:26.000Z | 2020-10-18T15:54:26.000Z | input/EnvEq/pairwise/Tneg-Tpro/u_lim_o2Tpro-u_lim_o2Tneg/parallelizer.py | Harshavardhan-BV/Cancer-compe-strat | e4decacd5779e85a68c81d0ce3bedf42dea2964f | [
"MIT"
] | null | null | null | input/EnvEq/pairwise/Tneg-Tpro/u_lim_o2Tpro-u_lim_o2Tneg/parallelizer.py | Harshavardhan-BV/Cancer-compe-strat | e4decacd5779e85a68c81d0ce3bedf42dea2964f | [
"MIT"
] | null | null | null | from multiprocessing import Pool
import EnvEq as ee
import numpy as np
import itertools as it
import os
#parsing input into numpy arrays
from input import *
y0=np.array([y0_Tpos,y0_Tpro,y0_Tneg,y0_o2,y0_test])
p=np.array([p_o2,p_test])
mu=np.array([[mu_o2Tpos,mu_o2Tpro,mu_o2Tneg],[mu_testTpos,mu_testTpro,0]])
lam=np.array([lam_o2,lam_test])
t_D=np.array([t_DTpos,t_DTpro,t_DTneg])
r=np.array([r_Tpos,r_Tpro,r_Tneg])
delta=np.array([delta_Tpos,delta_Tpro,delta_Tneg])
rho=np.array([rho_Tpos,rho_Tpro,rho_Tneg])
lim=np.array([[[l_lim_o2Tpos,u_lim_o2Tpos],[l_lim_o2Tpro,u_lim_o2Tpro],[l_lim_o2Tneg,u_lim_o2Tneg]],[[l_lim_testTpos,u_lim_testTpos],[l_lim_testTpro,u_lim_testTpro],[0,0]]],dtype=np.float64)
#make directories for saving raw_outputs
try:
os.makedirs("../../raw_output/EnvEq/"+f_name)
except:
pass
#iterator over these
o2_lim_arr=np.empty([0,2])
for ulim_Tpro in np.arange(0.1,1,0.2):
for ulim_Tneg in np.arange(0.1,1,0.2):
o2_lim_arr=np.append(o2_lim_arr,[[ulim_Tpro,ulim_Tneg]],axis=0)
if __name__ == '__main__':
pool = Pool(4)
pool.map(solve_parm,o2_lim_arr) #iterate over the o2_lims
pool.close()
pool.join()
| 34.046512 | 190 | 0.733607 |
866f56e685c4eea3f8e5c6a81ebbf185f955f32d | 4,495 | py | Python | task1_makeTrainingDataset.py | 1985312383/contest | c4734647ad436cf5884075f906a3e9f10fc4dcfa | [
"Apache-2.0"
] | 2 | 2021-12-10T08:38:47.000Z | 2021-12-31T08:44:18.000Z | task1_makeTrainingDataset.py | huxiaoyi0625/Mathematical_Modeling_Contest_E_2021 | 40293aa2375daa46d2351870c72394d4e1114081 | [
"Apache-2.0"
] | null | null | null | task1_makeTrainingDataset.py | huxiaoyi0625/Mathematical_Modeling_Contest_E_2021 | 40293aa2375daa46d2351870c72394d4e1114081 | [
"Apache-2.0"
] | null | null | null | import csv
import re
import numpy as np
thre = 1.5 # ,
iteration_num = 2 #
if __name__ == '__main__':
collect_dataset("")
collect_dataset("")
| 36.544715 | 117 | 0.602447 |
866fc71e2531405289f44f8b89f61a0e57369c55 | 321 | py | Python | checkmate/contrib/plugins/all/progpilot/setup.py | marcinguy/checkmate-ce | fc33c7c27bc640ab4db5dbda274a0edd3b3db218 | [
"MIT"
] | null | null | null | checkmate/contrib/plugins/all/progpilot/setup.py | marcinguy/checkmate-ce | fc33c7c27bc640ab4db5dbda274a0edd3b3db218 | [
"MIT"
] | null | null | null | checkmate/contrib/plugins/all/progpilot/setup.py | marcinguy/checkmate-ce | fc33c7c27bc640ab4db5dbda274a0edd3b3db218 | [
"MIT"
] | null | null | null | from .analyzer import ProgpilotAnalyzer
from .issues_data import issues_data
analyzers = {
'phpanlyzer' :
{
'name' : 'phpanalyzer',
'title' : 'phpanalyzer',
'class' : ProgpilotAnalyzer,
'language' : 'all',
'issues_data' : issues_data,
},
}
| 22.928571 | 40 | 0.535826 |
8671b6a372caa3589eb77dcc566a9b3713aa80a9 | 6,499 | py | Python | genlist.py | truckli/technotes | 11d3cc0a1bd33141a22eaa2247cac1be1d74718a | [
"Apache-2.0"
] | null | null | null | genlist.py | truckli/technotes | 11d3cc0a1bd33141a22eaa2247cac1be1d74718a | [
"Apache-2.0"
] | null | null | null | genlist.py | truckli/technotes | 11d3cc0a1bd33141a22eaa2247cac1be1d74718a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import shutil, re, os, sys
file_model = "Model.template"
bookname = "TechNotes"
file_bibtex = "thebib.bib"
folder_target = "../pdf/"
#if name is a chapter, return its sections
#if depend_files contains citation
#trim trailing slash
##################################################
#now work starts
files = os.listdir('.')
chap_sections = {}
book_sections = []
book_chapters = []
for chap in files:
sections = get_sections(chap)
if len(sections):
chap_sections[chap] = sections
book_sections.extend(sections)
book_chapters.append(chap)
cmd = "one"
if cmd == "one":
gen_pdf(bookname)
elif cmd == "all":
modified = False
for chap in chap_sections:
modified = gen_pdf(chap) or modified
if modified:
merge_chapter_pdfs()
elif cmd == "clean":
for chap in chap_sections:
remove_latex_tmps(chap)
remove_latex_tmps(bookname)
else:
chap = trim_chap_name(cmd)
if chap in book_sections:
#chap is actually a section
section = chap
chap = 'Report'
chap_sections[chap] = [section]
book_chapters.append(chap)
if not chap_sections.has_key(chap):
print(chap + " is not a valid chapter name")
sys.exit(1)
modified = gen_pdf(chap)
if modified and chap != 'Report':
merge_chapter_pdfs()
| 27.892704 | 82 | 0.610248 |
86720b27c369fdf8140425890d2127c46b5bc111 | 24,252 | py | Python | editing files/Portable Python 3.2.5.1/App/Lib/site-packages/serial/serialposix.py | mattl1598/testing | cd8124773b83a07301c507ffbb9ccaafbfe7a274 | [
"Unlicense"
] | null | null | null | editing files/Portable Python 3.2.5.1/App/Lib/site-packages/serial/serialposix.py | mattl1598/testing | cd8124773b83a07301c507ffbb9ccaafbfe7a274 | [
"Unlicense"
] | 1 | 2018-04-15T22:59:15.000Z | 2018-04-15T22:59:15.000Z | editing files/Portable Python 3.2.5.1/App/Lib/site-packages/serial/serialposix.py | mattl1598/Project-Mochachino | cd8124773b83a07301c507ffbb9ccaafbfe7a274 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# module for serial IO for POSIX compatible systems, like Linux
# see __init__.py
#
# (C) 2001-2010 Chris Liechti <[email protected]>
# this is distributed under a free software license, see license.txt
#
# parts based on code from Grant B. Edwards <[email protected]>:
# ftp://ftp.visi.com/users/grante/python/PosixSerial.py
#
# references: http://www.easysw.com/~mike/serial/serial.html
import sys, os, fcntl, termios, struct, select, errno, time
from .serialutil import *
# Do check the Python version as some constants have moved.
if (sys.hexversion < 0x020100f0):
import TERMIOS
else:
TERMIOS = termios
if (sys.hexversion < 0x020200f0):
import FCNTL
else:
FCNTL = fcntl
# try to detect the OS so that a device can be selected...
# this code block should supply a device() and set_special_baudrate() function
# for the platform
plat = sys.platform.lower()
if plat[:5] == 'linux': # Linux (confirmed)
ASYNC_SPD_MASK = 0x1030
ASYNC_SPD_CUST = 0x0030
baudrate_constants = {
0: 0000000, # hang up
50: 0o000001,
75: 0o000002,
110: 0o000003,
134: 0o000004,
150: 0o000005,
200: 0o000006,
300: 0o000007,
600: 0o000010,
1200: 0o000011,
1800: 0o000012,
2400: 0o000013,
4800: 0o000014,
9600: 0o000015,
19200: 0o000016,
38400: 0o000017,
57600: 0o010001,
115200: 0o010002,
230400: 0o010003,
460800: 0o010004,
500000: 0o010005,
576000: 0o010006,
921600: 0o010007,
1000000: 0o010010,
1152000: 0o010011,
1500000: 0o010012,
2000000: 0o010013,
2500000: 0o010014,
3000000: 0o010015,
3500000: 0o010016,
4000000: 0o010017
}
elif plat == 'cygwin': # cygwin/win32 (confirmed)
baudrate_constants = {}
elif plat == 'openbsd3': # BSD (confirmed)
baudrate_constants = {}
elif plat[:3] == 'bsd' or \
plat[:7] == 'freebsd' or \
plat[:7] == 'openbsd': # BSD (confirmed for freebsd4: cuaa%d)
baudrate_constants = {}
elif plat[:6] == 'darwin': # OS X
version = os.uname()[2].split('.')
# Tiger or above can support arbitrary serial speeds
if int(version[0]) >= 8:
else: # version < 8
baudrate_constants = {}
elif plat[:6] == 'netbsd': # NetBSD 1.6 testing by Erk
baudrate_constants = {}
elif plat[:4] == 'irix': # IRIX (partially tested)
baudrate_constants = {}
elif plat[:2] == 'hp': # HP-UX (not tested)
baudrate_constants = {}
elif plat[:5] == 'sunos': # Solaris/SunOS (confirmed)
baudrate_constants = {}
elif plat[:3] == 'aix': # AIX
baudrate_constants = {}
else:
# platform detection has failed...
sys.stderr.write("""\
don't know how to number ttys on this system.
! Use an explicit path (eg /dev/ttyS1) or send this information to
! the author of this module:
sys.platform = %r
os.name = %r
serialposix.py version = %s
also add the device name of the serial port and where the
counting starts for the first serial port.
e.g. 'first serial port: /dev/ttyS0'
and with a bit luck you can get this module running...
""" % (sys.platform, os.name, VERSION))
# no exception, just continue with a brave attempt to build a device name
# even if the device name is not correct for the platform it has chances
# to work using a string with the real device name as port parameter.
baudrate_constants = {}
#~ raise Exception, "this module does not run on this platform, sorry."
# whats up with "aix", "beos", ....
# they should work, just need to know the device names.
# load some constants for later use.
# try to use values from TERMIOS, use defaults from linux otherwise
TIOCMGET = hasattr(TERMIOS, 'TIOCMGET') and TERMIOS.TIOCMGET or 0x5415
TIOCMBIS = hasattr(TERMIOS, 'TIOCMBIS') and TERMIOS.TIOCMBIS or 0x5416
TIOCMBIC = hasattr(TERMIOS, 'TIOCMBIC') and TERMIOS.TIOCMBIC or 0x5417
TIOCMSET = hasattr(TERMIOS, 'TIOCMSET') and TERMIOS.TIOCMSET or 0x5418
#TIOCM_LE = hasattr(TERMIOS, 'TIOCM_LE') and TERMIOS.TIOCM_LE or 0x001
TIOCM_DTR = hasattr(TERMIOS, 'TIOCM_DTR') and TERMIOS.TIOCM_DTR or 0x002
TIOCM_RTS = hasattr(TERMIOS, 'TIOCM_RTS') and TERMIOS.TIOCM_RTS or 0x004
#TIOCM_ST = hasattr(TERMIOS, 'TIOCM_ST') and TERMIOS.TIOCM_ST or 0x008
#TIOCM_SR = hasattr(TERMIOS, 'TIOCM_SR') and TERMIOS.TIOCM_SR or 0x010
TIOCM_CTS = hasattr(TERMIOS, 'TIOCM_CTS') and TERMIOS.TIOCM_CTS or 0x020
TIOCM_CAR = hasattr(TERMIOS, 'TIOCM_CAR') and TERMIOS.TIOCM_CAR or 0x040
TIOCM_RNG = hasattr(TERMIOS, 'TIOCM_RNG') and TERMIOS.TIOCM_RNG or 0x080
TIOCM_DSR = hasattr(TERMIOS, 'TIOCM_DSR') and TERMIOS.TIOCM_DSR or 0x100
TIOCM_CD = hasattr(TERMIOS, 'TIOCM_CD') and TERMIOS.TIOCM_CD or TIOCM_CAR
TIOCM_RI = hasattr(TERMIOS, 'TIOCM_RI') and TERMIOS.TIOCM_RI or TIOCM_RNG
#TIOCM_OUT1 = hasattr(TERMIOS, 'TIOCM_OUT1') and TERMIOS.TIOCM_OUT1 or 0x2000
#TIOCM_OUT2 = hasattr(TERMIOS, 'TIOCM_OUT2') and TERMIOS.TIOCM_OUT2 or 0x4000
TIOCINQ = hasattr(TERMIOS, 'FIONREAD') and TERMIOS.FIONREAD or 0x541B
TIOCM_zero_str = struct.pack('I', 0)
TIOCM_RTS_str = struct.pack('I', TIOCM_RTS)
TIOCM_DTR_str = struct.pack('I', TIOCM_DTR)
TIOCSBRK = hasattr(TERMIOS, 'TIOCSBRK') and TERMIOS.TIOCSBRK or 0x5427
TIOCCBRK = hasattr(TERMIOS, 'TIOCCBRK') and TERMIOS.TIOCCBRK or 0x5428
# assemble Serial class with the platform specifc implementation and the base
# for file-like behavior. for Python 2.6 and newer, that provide the new I/O
# library, derrive from io.RawIOBase
try:
import io
except ImportError:
# classic version with our own file-like emulation
else:
# io library present
if __name__ == '__main__':
s = Serial(0,
baudrate=19200, # baud rate
bytesize=EIGHTBITS, # number of data bits
parity=PARITY_EVEN, # enable parity checking
stopbits=STOPBITS_ONE, # number of stop bits
timeout=3, # set a timeout value, None for waiting forever
xonxoff=0, # enable software flow control
rtscts=0, # enable RTS/CTS flow control
)
s.setRTS(1)
s.setDTR(1)
s.flushInput()
s.flushOutput()
s.write('hello')
sys.stdout.write('%r\n' % s.read(5))
sys.stdout.write('%s\n' % s.inWaiting())
del s
| 37.253456 | 117 | 0.601023 |
86735b1546f055d4c408acb184f9fb83f843ede1 | 2,272 | py | Python | Older Examples - enter at your own risk/lavender_pos/app/models.py | electricimp/examples | ebdd01baf64f3aa67f027194457432c7d7501d37 | [
"MIT"
] | 26 | 2015-01-17T23:43:06.000Z | 2021-09-28T18:24:28.000Z | Older Examples - enter at your own risk/lavender_pos/app/models.py | silver2row/examples | 0ef3bd4b4a875364db883c7c387f1f175a3ce61e | [
"MIT"
] | 5 | 2015-02-27T22:23:39.000Z | 2020-10-19T23:58:55.000Z | Older Examples - enter at your own risk/lavender_pos/app/models.py | silver2row/examples | 0ef3bd4b4a875364db883c7c387f1f175a3ce61e | [
"MIT"
] | 42 | 2015-01-22T16:33:12.000Z | 2021-01-14T02:33:15.000Z | import datetime
from database import Base
from sqlalchemy import Column, String, Integer, ForeignKey, DateTime, Float
| 28.049383 | 81 | 0.652729 |
8674487bc14ab6d974246602ccaa1b9927159028 | 4,724 | py | Python | rr_ml/nodes/end_to_end/train.py | ebretl/roboracing-software | 8803c97a885500069d04e70894b19f807ae5baf9 | [
"MIT"
] | null | null | null | rr_ml/nodes/end_to_end/train.py | ebretl/roboracing-software | 8803c97a885500069d04e70894b19f807ae5baf9 | [
"MIT"
] | null | null | null | rr_ml/nodes/end_to_end/train.py | ebretl/roboracing-software | 8803c97a885500069d04e70894b19f807ae5baf9 | [
"MIT"
] | null | null | null | import os
import math
import string
import numpy as np
import rospy
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, \
GaussianNoise, BatchNormalization
import cv2
import collections
import random
import time
from example_set import ExampleSet
from params import input_shape, expand_categories
n_examples_to_load = 8000 # if the number of training examples is below this, load more data
batch_size = 16
categories = [None]
| 33.503546 | 94 | 0.622777 |
86748acfd2e2d6f503ac8703ff17fb7002cb2fc1 | 252 | py | Python | build/lib/Kronos_heureka_code/Zeit/__init__.py | heureka-code/Kronos-heureka-code | 0ddbc93ec69f0bc50075071e6a3e406c9cc97737 | [
"MIT"
] | null | null | null | build/lib/Kronos_heureka_code/Zeit/__init__.py | heureka-code/Kronos-heureka-code | 0ddbc93ec69f0bc50075071e6a3e406c9cc97737 | [
"MIT"
] | null | null | null | build/lib/Kronos_heureka_code/Zeit/__init__.py | heureka-code/Kronos-heureka-code | 0ddbc93ec69f0bc50075071e6a3e406c9cc97737 | [
"MIT"
] | null | null | null | from Kronos_heureka_code.Zeit.Uhrzeit import Uhrzeit, Stunde, Minute, Sekunde
from Kronos_heureka_code.Zeit.Datum.Monat import Monate
from Kronos_heureka_code.Zeit.Datum.Jahr import Jahr, Zeitrechnung
from Kronos_heureka_code.Zeit.Datum.Tag import Tag
| 50.4 | 77 | 0.861111 |
867600e9a45eebd6cdfd857a6ba6c53cc063cb70 | 845 | py | Python | retro_star/utils/logger.py | cthoyt/retro_star | 280231eb2f5dffc0e14bed300d770977b323205a | [
"MIT"
] | 65 | 2020-06-27T04:28:21.000Z | 2022-03-30T11:18:22.000Z | retro_star/utils/logger.py | cthoyt/retro_star | 280231eb2f5dffc0e14bed300d770977b323205a | [
"MIT"
] | 15 | 2020-07-07T13:17:05.000Z | 2022-03-22T12:52:29.000Z | retro_star/utils/logger.py | cthoyt/retro_star | 280231eb2f5dffc0e14bed300d770977b323205a | [
"MIT"
] | 14 | 2020-06-30T09:22:13.000Z | 2022-03-30T11:18:28.000Z | import logging
| 33.8 | 81 | 0.571598 |
86760e9869adb9d8e359c444118b7fb153ad2c74 | 63,014 | py | Python | Packs/MISP/Integrations/MISPV3/MISPV3.py | hiep4hiep/content | f609c4c9548fe2188e8e2e00b2c9e80a74e24427 | [
"MIT"
] | null | null | null | Packs/MISP/Integrations/MISPV3/MISPV3.py | hiep4hiep/content | f609c4c9548fe2188e8e2e00b2c9e80a74e24427 | [
"MIT"
] | 42 | 2022-03-11T10:52:26.000Z | 2022-03-31T01:50:42.000Z | Packs/MISP/Integrations/MISPV3/MISPV3.py | hiep4hiep/content | f609c4c9548fe2188e8e2e00b2c9e80a74e24427 | [
"MIT"
] | null | null | null | # type: ignore
from typing import Union, List, Dict
from urllib.parse import urlparse
import urllib3
from pymisp import ExpandedPyMISP, PyMISPError, MISPObject, MISPSighting, MISPEvent, MISPAttribute
from pymisp.tools import GenericObjectGenerator
import copy
from pymisp.tools import FileObject
from CommonServerPython import *
logging.getLogger("pymisp").setLevel(logging.CRITICAL)
def warn(*args):
"""
Do nothing with warnings
"""
pass
# Disable requests warnings
urllib3.disable_warnings()
# Disable python warnings
warnings.warn = warn
''' GLOBALS/PARAMS '''
params = demisto.params()
if not params.get('credentials') or not (MISP_API_KEY := params.get('credentials', {}).get('password')):
raise DemistoException('Missing API Key. Fill in a valid key in the integration configuration.')
MISP_URL = params.get('url')
VERIFY = not params.get('insecure')
PROXIES = handle_proxy() # type: ignore
try:
PYMISP = ExpandedPyMISP(url=MISP_URL, key=MISP_API_KEY, ssl=VERIFY, proxies=PROXIES)
except PyMISPError as e:
handle_connection_errors(e.message)
PREDEFINED_FEEDS = {
'CIRCL': {'name': 'CIRCL OSINT Feed',
'url': 'https://www.circl.lu/doc/misp/feed-osint',
'format': 'misp',
'input': 'network'},
'Botvrij.eu': {'name': 'The Botvrij.eu Data',
'url': 'http://www.botvrij.eu/data/feed-osint',
'format': 'misp',
'input': 'network'}
}
THREAT_LEVELS_TO_ID = {
'High': 1,
'Medium': 2,
'Low': 3,
'Unknown': 4
}
MISP_ENTITIES_TO_CONTEXT_DATA = {
'deleted': 'Deleted',
'category': 'Category',
'comment': 'Comment',
'uuid': 'UUID',
'sharing_group_id': 'SharingGroupID',
'timestamp': 'LastChanged',
'to_ids': 'ToIDs',
'value': 'Value',
'event_id': 'EventID',
'ShadowAttribute': 'ShadowAttribute',
'disable_correlation': 'DisableCorrelation',
'distribution': 'Distribution',
'type': 'Type',
'id': 'ID',
'date': 'CreationDate',
'info': 'Info',
'published': 'Published',
'attribute_count': 'AttributeCount',
'proposal_email_lock': 'ProposalEmailLock',
'locked': 'Locked',
'publish_timestamp': 'PublishTimestamp',
'event_creator_email': 'EventCreatorEmail',
'name': 'Name',
'analysis': 'Analysis',
'threat_level_id': 'ThreatLevelID',
'old_id': 'OldID',
'org_id': 'OrganizationID',
'Org': 'Organization',
'Orgc': 'OwnerOrganization',
'orgc_uuid': 'OwnerOrganization.UUID',
'orgc_id': 'OwnerOrganization.ID',
'orgc_name': 'OwnerOrganization.Name',
'event_uuid': 'EventUUID',
'proposal_to_delete': 'ProposalToDelete',
'description': 'Description',
'version': 'Version',
'Object': 'Object',
'object_id': 'ObjectID',
'object_relation': 'ObjectRelation',
'template_version': 'TemplateVersion',
'template_uuid': 'TemplateUUID',
'meta-category': 'MetaCategory',
'decay_score': 'DecayScore',
'first_seen': 'first_seen',
'last_seen': 'last_seen',
'provider': 'Provider',
'source_format': 'SourceFormat',
'url': 'URL',
'event_uuids': 'EventUUIDS',
}
MISP_ANALYSIS_TO_IDS = {
'initial': 0,
'ongoing': 1,
'completed': 2
}
MISP_DISTRIBUTION_TO_IDS = {
'Your_organization_only': 0,
'This_community_only': 1,
'Connected_communities': 2,
'All_communities': 3,
'Inherit_event': 5
}
SIGHTING_TYPE_NAME_TO_ID = {
'sighting': 0,
'false_positive': 1,
'expiration': 2
}
SIGHTING_TYPE_ID_TO_NAME = {
'0': 'sighting',
'1': 'false_positive',
'2': 'expiration'
}
INDICATOR_TYPE_TO_DBOT_SCORE = {
'FILE': DBotScoreType.FILE,
'URL': DBotScoreType.URL,
'DOMAIN': DBotScoreType.DOMAIN,
'IP': DBotScoreType.IP,
'EMAIL': DBotScoreType.EMAIL,
}
DOMAIN_REGEX = (
r"([a-z-\uffff0-9](?:[a-z-\uffff0-9-]{0,61}"
"[a-z-\uffff0-9])?(?:\\.(?!-)[a-z-\uffff0-9-]{1,63}(?<!-))*"
"\\.(?!-)(?!(jpg|jpeg|exif|tiff|tif|png|gif|otf|ttf|fnt|dtd|xhtml|css"
"|html)$)(?:[a-z-\uffff-]{2,63}|xn--[a-z0-9]{1,59})(?<!-)\\.?$"
"|localhost)"
)
MISP_SEARCH_ARGUMENTS = [
'value',
'type',
'category',
'org',
'tags',
'from',
'to',
'event_id',
'uuid',
'to_ids',
'last',
'include_decay_score',
'include_sightings',
'include_correlations',
'limit',
'page',
'enforceWarninglist',
'include_feed_correlations',
]
EVENT_FIELDS = [
'id',
'orgc_id',
'org_id',
'date',
'threat_level_id',
'info',
'published',
'uuid',
'analysis',
'attribute_count',
'timestamp',
'distribution',
'proposal_email_lock',
'locked',
'publish_timestamp',
'sharing_group_id',
'disable_correlation',
'event_creator_email',
'Org',
'Orgc',
'RelatedEvent',
'Galaxy',
'Tag',
'decay_score',
'Object',
'Feed',
]
ATTRIBUTE_FIELDS = [
'id',
'event_id',
'object_id',
'object_relation',
'category',
'type',
'to_ids',
'uuid',
'timestamp',
'distribution',
'sharing_group_id',
'comment',
'deleted',
'disable_correlation',
'first_seen',
'last_seen',
'value',
'Event',
'Object',
'Galaxy',
'Tag',
'decay_score',
'Sighting',
]
def extract_error(error: list) -> List[dict]:
"""
Extracting errors raised by PYMISP into readable response, for more information and examples
please see UT: test_extract_error.
Args:
error: list of responses from error section
Returns:
List[Dict[str, any]]: filtered response
"""
return [{
'code': err[0],
'message': err[1].get('message'),
'errors': err[1].get('errors')
} for err in error]
def dict_to_generic_object_format(args: dict) -> List[dict]:
"""
Converts args dict into a list, please see GenericObjectGenerator Class in Pymisp.
Args:
args: dictionary describes MISP object
Returns:
list: list containing dicts that GenericObjectGenerator can take.
Examples:
>>> {'ip': '8.8.8.8', 'domain': 'google.com'}
[{'ip': '8.8.8.8'}, {'domain': 'google.com'}]
"""
return [{k: v} for k, v in args.items()]
def build_generic_object(template_name: str, args: List[dict]) -> GenericObjectGenerator:
"""
Args:
template_name: template name as described in https://github.com/MISP/misp-objects
args: arguments to create the generic object
Returns:
GenericObjectGenerator: object created in MISP
Example:
args should look like:
[{'analysis_submitted_at': '2018-06-15T06:40:27'},
{'threat_score': {value=95, to_ids=False}},
{'permalink': 'https://panacea.threatgrid.com/mask/samples/2e445ef5389d8b'},
{'heuristic_raw_score': 7.8385159793597}, {'heuristic_score': 96},
{'original_filename': 'juice.exe'}, {'id': '2e445ef5389d8b'}] # guardrails-disable-line
"""
misp_object = GenericObjectGenerator(template_name)
misp_object.generate_attributes(args)
return misp_object
def misp_convert_timestamp_to_date_string(timestamp: Union[str, int]) -> str:
"""
Gets a timestamp from MISP response (1546713469) and converts it to human readable format
"""
return datetime.utcfromtimestamp(int(timestamp)).strftime('%Y-%m-%dT%H:%M:%SZ') if timestamp else ""
def replace_keys_from_misp_to_context_data(obj_to_build: Union[dict, list, str]) -> Union[dict, list, str]:
"""
Replacing keys from MISP's format to Demisto's (as appear in ENTITIESDICT)
Args:
obj_to_build (Union[dict, list, str]): object to replace keys in
Returns:
Union[dict, list, str]: same object type that got in
"""
if isinstance(obj_to_build, list):
return [replace_keys_from_misp_to_context_data(item) for item in obj_to_build]
if isinstance(obj_to_build, dict):
return {
(MISP_ENTITIES_TO_CONTEXT_DATA[key] if key in MISP_ENTITIES_TO_CONTEXT_DATA else key):
replace_keys_from_misp_to_context_data(value) for key, value in obj_to_build.items()
}
return obj_to_build
def limit_tag_output_to_id_and_name(attribute_dict, is_event_level):
"""
As tag list can be full of in unnecessary data, we want to limit this list to include only the ID and Name fields.
In addition, returns set of the found tag ids.
Some tags have a field called inherited. When it is set to 1 it says that it is an event's tag.
Otherwise (if it is set to 0 or not exists) it says that it is an attribute's tag.
If the data is event's (is_event_level = true) we would like to add to tag_set_ids all the tags
(event ones and the event's attribute tags ones as it is part of the event scope).
If the data is attribute's (is_event_level = false), and the tag is only related to an attribute
we would like to add it to tag_set_ids. In any other case, we won't add the tag.
Args:
attribute_dict (dict): The dictionary that includes the tag list.
is_event_level (bool): Whether the attribute_dict was received from an event object,
meaning the tags are event's ones. Otherwise, the data is attribute's (attribute tags).
"""
output = []
tag_set_ids = set()
tags_list = attribute_dict.get('Tag', [])
for tag in tags_list:
is_event_tag = tag.get('inherited', 0) # field doesn't exist when this is an attribute level, default is '0'
tag_id = tag.get('id')
if is_event_level:
tag_set_ids.add(tag_id)
else: # attribute level
if not is_event_tag:
tag_set_ids.add(tag_id)
output.append({'ID': tag_id, 'Name': tag.get('name')})
return output, tag_set_ids
def parse_response_reputation_command(misp_response, malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
After getting all the attributes which match the required indicator value, this function parses the response.
This function goes over all the attributes that found (after limit the attributes amount to the given limit)
and by sub-functions calculated the score of the indicator.
For the context data outputs, for every attribute we remove the "Related Attribute" list and limits the tags and
galaxies lists. Eventually, the outputs will be a list of attributes along with their events objects.
Note: When limits the attributes amount, we sort the attributes list by the event ids as the greater event ids are
the newer ones.
Returns:
response (dict): The parsed outputs to context data (array of attributes).
score: the indicator score
found_tag: the tag (id) which made the indicator to get that score
found_related_events (dict): contains info (name, id, threat level id) about all the events that include
the indicator
Please see an example for a response in test_data/reputation_command_response.json
Please see an example for a parsed output in test_data/reputation_command_outputs.json
"""
response = copy.deepcopy(misp_response)
attributes_list = response.get('Attribute')
if not attributes_list:
return None
attributes_list = sorted(attributes_list,
key=lambda attribute_item: attribute_item['event_id'], reverse=True)[:attributes_limit]
found_related_events, attributes_tag_ids, event_tag_ids = prepare_attributes_array_to_context_data(attributes_list)
attribute_in_event_with_bad_threat_level = found_event_with_bad_threat_level_id(found_related_events)
score, found_tag = get_score(attribute_tags_ids=attributes_tag_ids, event_tags_ids=event_tag_ids,
malicious_tag_ids=malicious_tag_ids, suspicious_tag_ids=suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level=attribute_in_event_with_bad_threat_level)
formatted_response = replace_keys_from_misp_to_context_data({'Attribute': attributes_list})
return formatted_response, score, found_tag, found_related_events
def get_score(attribute_tags_ids, event_tags_ids, malicious_tag_ids, suspicious_tag_ids,
is_attribute_in_event_with_bad_threat_level):
"""
Calculates the indicator score by following logic. Indicators of attributes and Events that:
* have tags which configured as malicious will be scored 3 (i.e malicious).
* have tags which configured as suspicious will be scored 2 (i.e suspicious).
* don't have any tags configured as suspicious nor malicious will be scored by their event's threat level id. In
such case, the score will be BAD if the threat level id is in [1,2,3]. Otherwise, the threat level is 4 = Unknown.
note:
- In case the same tag appears in both Malicious tag ids and Suspicious tag ids lists the indicator will
be scored as malicious.
- Attributes tags (both malicious and suspicious) are stronger than events' tags.
"""
found_tag = None
is_attribute_tag_malicious = any((found_tag := tag) in attribute_tags_ids for tag in malicious_tag_ids)
if is_attribute_tag_malicious:
return Common.DBotScore.BAD, found_tag
is_attribute_tag_suspicious = any((found_tag := tag) in attribute_tags_ids for tag in suspicious_tag_ids)
if is_attribute_tag_suspicious:
return Common.DBotScore.SUSPICIOUS, found_tag
is_event_tag_malicious = any((found_tag := tag) in event_tags_ids for tag in malicious_tag_ids)
if is_event_tag_malicious:
return Common.DBotScore.BAD, found_tag
is_event_tag_suspicious = any((found_tag := tag) in event_tags_ids for tag in suspicious_tag_ids)
if is_event_tag_suspicious:
return Common.DBotScore.SUSPICIOUS, found_tag
# no tag was found
if is_attribute_in_event_with_bad_threat_level:
return Common.DBotScore.BAD, None
return Common.DBotScore.NONE, None
def get_new_misp_event_object(args):
"""
Create a new MISP event object and set the event's details.
"""
event = MISPEvent()
event.distribution = MISP_DISTRIBUTION_TO_IDS[args.get('distribution')]
threat_level_id_arg = args.get('threat_level_id')
if threat_level_id_arg:
event.threat_level_id = THREAT_LEVELS_TO_ID[threat_level_id_arg]
analysis_arg = args.get('analysis')
event.analysis = MISP_ANALYSIS_TO_IDS.get(analysis_arg) if analysis_arg in MISP_ANALYSIS_TO_IDS else analysis_arg
event.info = args.get('info') if args.get('info') else 'Event from XSOAR'
event.date = datetime.today()
event.published = argToBoolean(args.get('published', 'False'))
return event
def create_event_command(demisto_args: dict):
"""Creating event in MISP with the given attribute args"""
new_event = get_new_misp_event_object(demisto_args)
new_event = PYMISP.add_event(new_event, True)
if isinstance(new_event, dict) and new_event.get('errors'):
raise DemistoException(new_event.get('errors'))
event_id = new_event.id
add_attribute(event_id=event_id, internal=True, new_event=new_event, demisto_args=demisto_args)
event = PYMISP.search(eventid=event_id)
human_readable = f"## MISP create event\nNew event with ID: {event_id} has been successfully created.\n"
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(event),
raw_response=event
)
def add_attribute(event_id: int = None, internal: bool = False, demisto_args: dict = {}, new_event: MISPEvent = None):
"""Adding attribute to a given MISP event object
This function can be called as an independence command or as part of another command (create event for example)
Args:
event_id (int): Event ID to add attribute to
internal (bool): if set to True, will not post results to Demisto
demisto_args (dict): Demisto args
new_event (MISPEvent): When this function was called from create event command, the attrubite will be added to
that existing event.
"""
attributes_args = {
'id': demisto_args.get('event_id'), # misp event id
'type': demisto_args.get('type', 'other'),
'category': demisto_args.get('category', 'External analysis'),
'to_ids': argToBoolean(demisto_args.get('to_ids', True)),
'comment': demisto_args.get('comment'),
'value': demisto_args.get('value')
}
event_id = event_id if event_id else arg_to_number(demisto_args.get('event_id'), "event_id")
attributes_args.update({'id': event_id}) if event_id else None
distribution = demisto_args.get('distribution')
attributes_args.update({'distribution': MISP_DISTRIBUTION_TO_IDS[distribution]}) if distribution else None
if not new_event:
response = PYMISP.search(eventid=event_id, pythonify=True)
if not response:
raise DemistoException(
f"Error: An event with the given id: {event_id} was not found in MISP. please check it once again")
new_event = response[0] # response[0] is MISP event
new_event.add_attribute(**attributes_args)
PYMISP.update_event(event=new_event)
if internal:
return
value = attributes_args.get('value')
updated_event = PYMISP.search(eventid=new_event.id, controller='attributes', value=value)
human_readable = f"## MISP add attribute\nNew attribute: {value} was added to event id {new_event.id}.\n"
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(updated_event),
raw_response=updated_event
)
def get_indicator_results(value, dbot_type, malicious_tag_ids, suspicious_tag_ids, reliability, attributes_limit):
"""
This function searches for the given attribute value in MISP and then calculates it's dbot score.
The score is calculated by the tags ids (attribute tags and event tags).
Args:
value (str): The indicator value (an IP address, email address, domain, url or file hash).
dbot_type (str): Indicator type (file, url, domain, email or ip).
malicious_tag_ids (set): Tag ids should be recognised as malicious.
suspicious_tag_ids (set): Tag ids should be recognised as suspicious
reliability (DBotScoreReliability): integration reliability score.
attributes_limit (int) : Limits the number of attributes that will be written to the context
Returns:
CommandResults includes all the indicator results.
"""
reputation_value_validation(value, dbot_type)
misp_response = PYMISP.search(value=value, controller='attributes', include_context=True,
include_correlations=True, include_event_tags=True, enforce_warninglist=True,
include_decay_score=True, includeSightings=True)
indicator_type = INDICATOR_TYPE_TO_DBOT_SCORE[dbot_type]
is_indicator_found = misp_response and misp_response.get('Attribute')
if is_indicator_found:
outputs, score, found_tag, found_related_events = parse_response_reputation_command(misp_response,
malicious_tag_ids,
suspicious_tag_ids,
attributes_limit)
dbot = Common.DBotScore(indicator=value, indicator_type=indicator_type,
score=score, reliability=reliability, malicious_description="Match found in MISP")
indicator = get_dbot_indicator(dbot_type, dbot, value)
all_attributes = outputs.get('Attribute')
events_to_human_readable = get_events_related_to_scored_tag(all_attributes, found_tag)
attribute_highlights = reputation_command_to_human_readable(all_attributes, score, events_to_human_readable)
readable_output = tableToMarkdown(f'Results found in MISP for value: {value}', attribute_highlights,
removeNull=True)
readable_output += tableToMarkdown('Related events', list(found_related_events.values()))
return CommandResults(indicator=indicator,
raw_response=misp_response,
outputs=all_attributes,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
readable_output=readable_output)
else:
dbot = Common.DBotScore(indicator=value, indicator_type=indicator_type,
score=Common.DBotScore.NONE, reliability=reliability,
malicious_description="No results were found in MISP")
indicator = get_dbot_indicator(dbot_type, dbot, value)
return CommandResults(indicator=indicator,
readable_output=f"No attributes found in MISP for value: {value}")
def get_events_related_to_scored_tag(all_attributes, found_tag):
"""
This function searches for all the events that have the tag (i.e found_tag) which caused the indicator to be scored
as malicious or suspicious.
Args:
all_attributes (dict): The parsed response from the MISP search attribute request
found_tag (str): The tag that was scored as malicious or suspicious. If no tag was found, then the score is
Unknown so no events should be found.
Returns:
list includes all the events that were detected as related to the tag.
"""
scored_events = []
if found_tag:
for attribute in all_attributes:
event = attribute.get('Event', {})
event_name = event.get('Info')
scored_events.extend(search_events_with_scored_tag(event, found_tag, event_name))
scored_events.extend(search_events_with_scored_tag(attribute, found_tag, event_name))
return remove_duplicated_related_events(scored_events)
def search_events_with_scored_tag(object_data_dict, found_tag, event_name):
"""
By the given object we go over all the tags and search if found_tag is one of it's tags. If so, the event will be
added to related_events list
Args:
object_data_dict (dict): Event or attribute dict which includes tags list.
found_tag (str): The tag that was scored as malicious or suspicious.
event_name (str): Name of the event
"""
related_events = []
object_tags_list = object_data_dict.get('Tag', [])
for tag in object_tags_list:
if tag.get('ID') == found_tag:
event_id = get_event_id(object_data_dict)
tag_name = tag.get('Name')
related_events.append({'Event_ID': event_id, 'Event_Name': event_name,
'Tag_Name': tag_name, 'Tag_ID': tag.get('ID')})
return related_events
def build_misp_complex_filter(demisto_query: str):
"""
Examples are available in UT: test_build_misp_complex_filter.
For more information please see build_complex_query in pymisp/api.py
Args:
demisto_query: complex query contains saved words: 'AND:', 'OR:' and 'NOT:'
using ',' as delimiter for parameters and ';' as delimiter for operators.
using the operators is optional.
if 'demisto_query' does not contains any of the complex operators the original
input will be returned
Returns:
str: dictionary created for misp to perform complex query
or if no complex query found returns the original input
"""
regex_and = r'(AND:)([^\;]+)(;)?'
regex_or = r'(OR:)([^\;]+)(;)?'
regex_not = r'(NOT:)([^\;]+)(;)?'
misp_query_params = dict()
match_and = re.search(regex_and, demisto_query, re.MULTILINE)
match_or = re.search(regex_or, demisto_query, re.MULTILINE)
match_not = re.search(regex_not, demisto_query, re.MULTILINE)
is_complex_and_operator = is_misp_complex_search_helper(match_and, misp_query_params, 'and_parameters')
is_complex_or_operator = is_misp_complex_search_helper(match_or, misp_query_params, 'or_parameters')
is_complex_not_operator = is_misp_complex_search_helper(match_not, misp_query_params, 'not_parameters')
is_complex_search = is_complex_and_operator or is_complex_or_operator or is_complex_not_operator
if is_complex_search:
return PYMISP.build_complex_query(**misp_query_params)
return demisto_query
def build_attributes_search_response(response: Union[dict, requests.Response],
include_correlations=False) -> dict:
"""
Convert the response of attribute search returned from MISP to the context output format.
"""
response_object = copy.deepcopy(response)
if include_correlations:
# return full related attributes only if the user wants to get them back
ATTRIBUTE_FIELDS.append('RelatedAttribute')
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
attributes = response_object.get('Attribute')
return get_limit_attribute_search_outputs(attributes)
def build_galaxy_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Galaxy'):
given_object['Galaxy'] = [
{
'name': star.get('name'),
'type': star.get('type'),
'description': star.get('description')
} for star in given_object['Galaxy']
]
def build_tag_output(given_object):
"""given_object is attribute or event, depends on the called function"""
if given_object.get('Tag'):
given_object['Tag'] = [
{'Name': tag.get('name'),
'is_galaxy': tag.get('is_galaxy')
} for tag in given_object.get('Tag')
]
def build_attributes_search_response_return_only_values(response_object: Union[dict, requests.Response]) -> list:
"""returns list of attributes' values that match the search query when user set the arg 'compact' to True"""
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
attributes = response_object.get('Attribute')
return [attribute.get('value') for attribute in attributes]
def search_attributes(demisto_args: dict) -> CommandResults:
"""Execute a MISP search over 'attributes'"""
args = prepare_args_to_search('attributes')
outputs_should_include_only_values = argToBoolean(demisto_args.get('compact', False))
include_correlations = argToBoolean(demisto_args.get('include_correlations', False))
page = arg_to_number(demisto_args.get('page', 1), "page", required=True)
limit = arg_to_number(demisto_args.get('limit', 50), "limit", required=True)
pagination_args_validation(page, limit)
response = PYMISP.search(**args)
if response:
if outputs_should_include_only_values:
response_for_context = build_attributes_search_response_return_only_values(response)
number_of_results = len(response_for_context)
md = tableToMarkdown(f"MISP search-attributes returned {number_of_results} attributes",
response_for_context[:number_of_results], ["Value"])
else:
response_for_context = build_attributes_search_response(response, include_correlations)
attribute_highlights = attribute_response_to_markdown_table(response_for_context)
pagination_message = f"Current page size: {limit}\n"
if len(response_for_context) == limit:
pagination_message += f"Showing page {page} out others that may exist"
else:
pagination_message += f"Showing page {page}"
md = tableToMarkdown(
f"MISP search-attributes returned {len(response_for_context)} attributes\n {pagination_message}",
attribute_highlights, removeNull=True)
return CommandResults(
raw_response=response,
readable_output=md,
outputs=response_for_context,
outputs_prefix="MISP.Attribute",
outputs_key_field="ID"
)
else:
return CommandResults(readable_output=f"No attributes found in MISP for the given filters: {args}")
def build_events_search_response(response: Union[dict, requests.Response]) -> dict:
"""
Convert the response of event search returned from MISP to the context output format.
please note: attributes are excluded from search-events output as the information is too big. User can use the
command search-attributes in order to get the information about the attributes.
"""
response_object = copy.deepcopy(response)
if isinstance(response_object, str):
response_object = json.loads(json.dumps(response_object))
events = [event.get('Event') for event in response_object]
for i in range(0, len(events)):
# Filter object from keys in event_args
events[i] = {key: events[i].get(key) for key in EVENT_FIELDS if key in events[i]}
events[i]['RelatedEvent'] = [] # there is no need in returning related event when searching for an event
build_galaxy_output(events[i])
build_tag_output(events[i])
build_object_output(events[i])
events[i]['timestamp'] = misp_convert_timestamp_to_date_string(events[i].get('timestamp'))
events[i]['publish_timestamp'] = misp_convert_timestamp_to_date_string(events[i].get('publish_timestamp'))
formatted_events = replace_keys_from_misp_to_context_data(events) # type: ignore
return formatted_events # type: ignore
def search_events(demisto_args: dict) -> CommandResults:
"""
Execute a MISP search using the 'event' controller.
"""
args = prepare_args_to_search('events')
page = arg_to_number(demisto_args.get('page', 1), "page", required=True)
limit = arg_to_number(demisto_args.get('limit', 50), "limit", required=True)
pagination_args_validation(page, limit)
response = PYMISP.search(**args)
if response:
response_for_context = build_events_search_response(response)
event_outputs_to_human_readable = event_to_human_readable(response_for_context)
pagination_message = f"Current page size: {limit}\n"
if len(response_for_context) == limit:
pagination_message += f"Showing page {page} out others that may exist"
else:
pagination_message += f"Showing page {page}"
md = tableToMarkdown(
f"MISP search-events returned {len(response_for_context)} events.\n {pagination_message}",
event_outputs_to_human_readable, removeNull=True)
return CommandResults(
raw_response=response,
readable_output=md,
outputs=response_for_context,
outputs_prefix="MISP.Event",
outputs_key_field="ID"
)
else:
return CommandResults(readable_output=f"No events found in MISP for the given filters: {args}")
def delete_event(demisto_args: dict):
"""
Gets an event id and deletes it.
"""
event_id = demisto_args.get('event_id')
response = PYMISP.delete_event(event_id)
if 'errors' in response:
raise DemistoException(f'Event ID: {event_id} has not found in MISP: \nError message: {response}')
else:
human_readable = f'Event {event_id} has been deleted'
return CommandResults(readable_output=human_readable, raw_response=response)
def add_tag(demisto_args: dict, is_attribute=False):
"""
Function will add tag to given UUID of event or attribute.
is_attribute (bool): if the given UUID belongs to an attribute (True) or event (False).
"""
uuid = demisto_args.get('uuid')
tag = demisto_args.get('tag')
try:
PYMISP.tag(uuid, tag) # add the tag
except PyMISPError:
raise DemistoException("Adding the required tag was failed. Please make sure the UUID exists.")
if is_attribute:
response = PYMISP.search(uuid=uuid, controller='attributes')
human_readable = f'Tag {tag} has been successfully added to attribute {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(response),
raw_response=response
)
# event's uuid
response = PYMISP.search(uuid=uuid)
human_readable = f'Tag {tag} has been successfully added to event {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(response),
raw_response=response
)
def remove_tag(demisto_args: dict, is_attribute=False):
"""
Function will remove tag to given UUID of event or attribute.
is_attribute (bool): if the given UUID is an attribute's one. Otherwise it's event's.
"""
uuid = demisto_args.get('uuid')
tag = demisto_args.get('tag')
try:
response = PYMISP.untag(uuid, tag)
if response and response.get('errors'):
raise DemistoException(f'Error in `{demisto.command()}` command: {response}')
except PyMISPError:
raise DemistoException("Removing the required tag was failed. Please make sure the UUID and tag exist.")
if is_attribute:
response = PYMISP.search(uuid=uuid, controller='attributes')
human_readable = f'Tag {tag} has been successfully removed from the attribute {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Attribute',
outputs_key_field='ID',
outputs=build_attributes_search_response(response),
raw_response=response
)
# event's uuid
response = PYMISP.search(uuid=uuid)
human_readable = f'Tag {tag} has been successfully removed from the event {uuid}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=build_events_search_response(response),
raw_response=response
)
def add_sighting(demisto_args: dict):
"""Adds sighting to MISP attribute
"""
attribute_id = demisto_args.get('id')
attribute_uuid = demisto_args.get('uuid')
sighting_type = demisto_args['type'] # mandatory arg
att_id = attribute_id or attribute_uuid
if not att_id:
raise DemistoException('ID or UUID not specified')
sighting_args = {
'id': attribute_id,
'uuid': attribute_uuid,
'type': SIGHTING_TYPE_NAME_TO_ID[sighting_type]
}
sigh_obj = MISPSighting()
sigh_obj.from_dict(**sighting_args)
response = PYMISP.add_sighting(sigh_obj, att_id)
if response.get('message'):
raise DemistoException(f"An error was occurred: {response.get('message')}")
elif response.get('Sighting'):
human_readable = f'Sighting \'{sighting_type}\' has been successfully added to attribute {att_id}'
return CommandResults(readable_output=human_readable)
raise DemistoException(f"An error was occurred: {json.dumps(response)}")
def test(malicious_tag_ids, suspicious_tag_ids, attributes_limit):
"""
Test module.
"""
is_tag_list_valid(malicious_tag_ids)
is_tag_list_valid(suspicious_tag_ids)
if attributes_limit < 0:
raise DemistoException('Attribute limit has to be a positive number.')
response = PYMISP._prepare_request('GET', 'servers/getPyMISPVersion.json')
if PYMISP._check_json_response(response):
return 'ok'
else:
raise DemistoException('MISP has not connected.')
def add_events_from_feed(demisto_args: dict, use_ssl: bool, proxies: dict):
"""Gets an OSINT feed from url and publishing them to MISP
urls with feeds for example: https://www.misp-project.org/feeds/
feed format must be MISP.
"""
headers = {'Accept': 'application/json'}
url = build_feed_url(demisto_args)
osint_url = f'{url}/manifest.json'
limit = arg_to_number(demisto_args.get('limit', 2), "limit", required=True)
try:
uri_list = requests.get(osint_url, verify=use_ssl, headers=headers, proxies=proxies).json()
events_ids = list() # type: List[Dict[str, int]]
for index, uri in enumerate(uri_list, 1):
response = requests.get(f'{url}/{uri}.json', verify=use_ssl, headers=headers, proxies=proxies).json()
misp_new_event = MISPEvent()
misp_new_event.load(response)
add_event_response = PYMISP.add_event(misp_new_event)
event_object = add_event_response.get('Event')
if event_object and 'id' in event_object:
events_ids.append({'ID': event_object['id']})
if limit == len(events_ids):
break
human_readable = tableToMarkdown(f'Total of {len(events_ids)} events was added to MISP.', events_ids)
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=events_ids,
)
except ValueError as e:
raise DemistoException(f'URL [{url}] is not a valid MISP feed. error: {e}')
def add_object(event_id: str, obj: MISPObject):
"""Sending object to MISP and returning outputs
Args:
obj: object to add to MISP
event_id: ID of event
"""
response = PYMISP.add_object(event_id, misp_object=obj)
if 'errors' in response:
raise DemistoException(f'Error in `{demisto.command()}` command: {response}')
for ref in obj.ObjectReference:
response = PYMISP.add_object_reference(ref)
for attribute in response.get('Object', {}).get('Attribute', []):
convert_timestamp_to_readable(attribute, None)
response['Object']['timestamp'] = misp_convert_timestamp_to_date_string(response.get('Object', {}).get('timestamp'))
formatted_response = replace_keys_from_misp_to_context_data(response)
formatted_response.update({"ID": event_id})
human_readable = f'Object has been added to MISP event ID {event_id}'
return CommandResults(
readable_output=human_readable,
outputs_prefix='MISP.Event',
outputs_key_field='ID',
outputs=formatted_response,
)
def add_domain_object(demisto_args: dict):
"""Adds a domain object to MISP
domain-ip description: https://www.misp-project.org/objects.html#_domain_ip
"""
text = demisto_args.get('text')
event_id = demisto_args.get('event_id')
domain = demisto_args.get('name')
obj = MISPObject('domain-ip')
ips = argToList(demisto_args.get('ip'))
for ip in ips:
obj.add_attribute('ip', value=ip)
obj.add_attribute('domain', value=domain)
if text:
obj.add_attribute('text', value=text)
return add_object(event_id, obj)
def add_url_object(demisto_args: dict):
"""Building url object in MISP scheme
Scheme described https://www.misp-project.org/objects.html#_url
"""
url_args = [
'text',
'last_seen',
'first_seen'
]
event_id = demisto_args.get('event_id')
url = demisto_args.get('url')
url_parse = urlparse(url)
url_obj = [{'url': url}]
url_obj.extend({'scheme': url_parse.scheme}) if url_parse.scheme else None
url_obj.append({'resource_path': url_parse.path}) if url_parse.path else None
url_obj.append({'query_string': url_parse.query}) if url_parse.query else None
url_obj.append({'domain': url_parse.netloc}) if url_parse.netloc else None
url_obj.append({'fragment': url_parse.fragment}) if url_parse.fragment else None
url_obj.append({'port': url_parse.port}) if url_parse.port else None
url_obj.append(
{'credential': (url_parse.username, url_parse.password)}) if url_parse.username and url_parse.password else None
url_obj.extend(convert_arg_to_misp_args(demisto_args, url_args))
g_object = build_generic_object('url', url_obj)
return add_object(event_id, g_object)
def handle_tag_duplication_ids(malicious_tag_ids, suspicious_tag_ids):
"""
Gets 2 sets which include tag ids. If there is an id that exists in both sets, it will be removed from the
suspicious tag ids set and will be stayed only in the malicious one (as a tag that was configured to be malicious is
stronger than recognised as suspicious).
"""
common_ids = set(malicious_tag_ids) & set(suspicious_tag_ids)
suspicious_tag_ids = {tag_id for tag_id in suspicious_tag_ids if tag_id not in common_ids}
return malicious_tag_ids, suspicious_tag_ids
def is_tag_list_valid(tag_ids):
"""Gets a list ot tag ids (each one is str), and verify all the tags are valid positive integers."""
for tag in tag_ids:
try:
tag = int(tag)
if tag <= 0:
raise DemistoException(f"Tag id has to be a positive integer, please change the given: '{tag}' id.")
except ValueError:
raise DemistoException(f"Tag id has to be a positive integer, please change the given: '{tag}' id.")
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
| 41.869767 | 120 | 0.674295 |
867722c3f684f02eb3e24dffeab626e5a7b8bb2c | 19,936 | py | Python | pycle/bicycle-scrapes/epey-scrape/downLink5.py | fusuyfusuy/School-Projects | 8e38f19da90f63ac9c9ec91e550fc5aaab3d0234 | [
"MIT"
] | null | null | null | pycle/bicycle-scrapes/epey-scrape/downLink5.py | fusuyfusuy/School-Projects | 8e38f19da90f63ac9c9ec91e550fc5aaab3d0234 | [
"MIT"
] | null | null | null | pycle/bicycle-scrapes/epey-scrape/downLink5.py | fusuyfusuy/School-Projects | 8e38f19da90f63ac9c9ec91e550fc5aaab3d0234 | [
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import os
import wget
from urllib.request import Request, urlopen
bicycles=[{'name': 'Kron XC150 27.5 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc150-27-5-hd.html'}, {'name': 'Corelli Trivor 3 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-trivor-3-0.html'}, {'name': 'Salcano Hector 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-hector-26-v.html'}, {'name': 'Corelli Atrox 3.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-atrox-3-2.html'}, {'name': 'Mosso WildFire LTD HYD 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-wildfire-hyd-27-5.html'}, {'name': 'Corelli Via 1.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-via-1-2.html'}, {'name': 'Kron FD 1000 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-fd-1000.html'}, {'name': 'Bisan CTS 5200 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bisan-cts-5200.html'}, {'name': 'Kron XC100 26 MD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc100-26-md.html'}, {'name': 'Bisan SPX-3250 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bisan-spx-3250.html'}, {'name': 'Kron RC1000 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-rc1000.html'}, {'name': 'Carraro E-Viva Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-e-viva.html'}, {'name': 'Kron Ares 4.0 26 MD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-ares-4-0-26-md.html'}, {'name': 'Carraro Monster 16 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-monster-16.html'}, {'name': 'Salcano Helen 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-helen-26.html'}, {'name': 'Bianchi RCX 527 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-rcx-527-27-5.html'}, {'name': 'RKS TNT5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/rks-tnt5.html'}, {'name': 'Corelli Via Lady 1.1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-via-lady-1-1.html'}, {'name': 'Corelli Snoop 3.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-snoop-3-0.html'}, {'name': 'Corelli Dolce 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-dolce-2-0.html'}, {'name': 'Corelli Neon 2.1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-neon-2-1.html'}, {'name': 'Kron CX100 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-cx100-man.html'}, {'name': 'Bianchi Aspid 27 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-aspid-27.html'}, {'name': 'Salcano zmir Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-izmir.html'}, {'name': 'mit 2610 Alanya Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2610-alanya.html'}, {'name': 'Kross Trans 5.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kross-trans-5-0.html'}, {'name': 'Kron ETX500 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-etx500.html'}, {'name': 'Salcano Attack 14 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-attack-14.html'}, {'name': 'Corelli Banner Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-banner.html'}, {'name': 'Corelli Voras 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-voras-1-0.html'}, {'name': 'Peugeot JM244 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-jm244.html'}, {'name': 'Corelli Smile 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-smile-20.html'}, {'name': 'Carraro Buffalo 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-buffalo-20.html'}, {'name': 'Carraro Elite 804 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-elite-804.html'}, {'name': 'mit 1605 Little Pony Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-1605-little-pony.html'}, {'name': 'mit 2400 Colorado Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2400-colorado.html'}, {'name': 'Kron CX50 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-cx50-26-v.html'}, {'name': 'Corelli Beauty 2.1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-beauty-2-1.html'}, {'name': 'Corelli Snoop 2.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-snoop-2-2.html'}, {'name': 'Corelli Evol 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-evol-2-0.html'}, {'name': 'Salcano Excel 24 Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-excel-24-lady.html'}, {'name': 'Corelli Apenin 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-apenin-1-0.html'}, {'name': 'Orbis Voltage 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-voltage-26.html'}, {'name': 'Mosso Groovy 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-groovy-29.html'}, {'name': 'Bianchi Aspid 36 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-aspid-36.html'}, {'name': 'mit 2864 Magnetic V Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2864-magnetic-v-lady.html'}, {'name': 'Cannondale F SI AL 3 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-f-si-al-3-27-5.html'}, {'name': 'Salcano Bodrum 26 Man Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-bodrum-26-man.html'}, {'name': 'Bianchi Energy D Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-energy-d-24.html'}, {'name': 'mit 2657 Albatros V Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2657-albatros-v.html'}, {'name': 'mit 2012 Ben10 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2012-ben10.html'}, {'name': 'mit 2002 Z-Trend Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2002-z-trend.html'}, {'name': 'Mosso 29 WildFire LTD V Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-29-wildfire-ltd-v.html'}, {'name': 'Salcano 300 20 MD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-300-20-md.html'}, {'name': 'Salcano City Wind Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-wind-lady-hd.html'}, {'name': 'Salcano NG444 27.5 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng444-27-5-hd.html'}, {'name': 'Carraro Daytona 927 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-daytona-927.html'}, {'name': 'Kron FD2100 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-fd2100.html'}, {'name': 'Kron WRC1000 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-wrc1000.html'}, {'name': 'Vortex 5.0 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/vortex-5-0-27-5.html'}, {'name': 'Kron XC75L 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc75l-20.html'}, {'name': 'Kron Vortex 4.0 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-vortex-4-0-26-v.html'}, {'name': 'Kron Anthea 3.0 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-anthea-3-0-20.html'}, {'name': 'Peugeot T16-28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-t16-28.html'}, {'name': 'Peugeot M15-26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-m15-26.html'}, {'name': 'Daafu SXC 100 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/daafu-sxc-100-20.html'}, {'name': 'Corelli Kickboy 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-kickboy-20.html'}, {'name': 'Peugeot F13 Bisiklet', 'link': 'https://www.epey.com/bisiklet/peugeot-f13.html'}, {'name': 'Carraro Elite 805 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-elite-805.html'}, {'name': 'Carraro Force 920 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-force-920.html'}, {'name': 'Berg Jeep Adventure Bisiklet', 'link': 'https://www.epey.com/bisiklet/berg-jeep-adventure.html'}, {'name': 'Berg Buddy Orange Bisiklet', 'link': 'https://www.epey.com/bisiklet/berg-buddy-orange.html'}, {'name': 'mit 2019 Picolo Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2019-picolo.html'}, {'name': 'mit 2833 Ventura Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2833-ventura-lady.html'}, {'name': 'mit 2668 Faster V Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2668-faster-v.html'}, {'name': 'mit 2960 Camaro HYD Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2960-camaro-hyd.html'}, {'name': 'Kron RF100 24 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-rf100-24-v.html'}, {'name': 'Sedona 240 Bisiklet', 'link': 'https://www.epey.com/bisiklet/sedona-240.html'}, {'name': 'Corelli Carmen 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-carmen-1-0.html'}, {'name': 'Corelli Swing 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-swing-2-0.html'}, {'name': 'Corelli Teton 2.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-teton-2-2.html'}, {'name': 'Bianchi Buffalo 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-buffalo-24.html'}, {'name': 'Carraro Juliana 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-juliana-26.html'}, {'name': 'Ghost Kato 5.7 AL Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-kato-5-7-al.html'}, {'name': 'Bianchi Intenso Potenza Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-intenso-potenza.html'}, {'name': 'Salcano mpetus 29 Deore Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-impetus-29-deore.html'}, {'name': 'Salcano NG400 27.5 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng400-27-5-lady-hd.html'}, {'name': 'Salcano NG750 26 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng750-26-lady-hd.html'}, {'name': 'Salcano NG800 24 Lady V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng800-24-lady-v.html'}, {'name': 'Salcano Lion FS Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-lion-fs.html'}, {'name': 'Salcano City Fun 50 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-fun-50-lady-hd.html'}, {'name': 'Salcano Marmaris Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-marmaris.html'}, {'name': 'Salcano NG 800 26 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng-800-26-v.html'}, {'name': 'Corelli Terra 1.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-terra-1-0.html'}, {'name': 'Corelli Adonis 2.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-adonis-2-2.html'}, {'name': 'Corelli Jazz 1.2 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-jazz-1-2.html'}, {'name': 'Corelli Cyborg 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-cyborg-2-0.html'}, {'name': 'Corelli Scopri 2.0 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corelli-scopri-2-0.html'}, {'name': 'Orbis Punkrose 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-punkrose-24.html'}, {'name': 'Orbis Tweety 16 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-tweety-16.html'}, {'name': 'Orbis Crazy 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-crazy-20.html'}, {'name': 'Orbis Cloud 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-cloud-20.html'}, {'name': 'Orbis Dynamic 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-dynamic-24.html'}, {'name': 'Orbis Escape 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/orbis-escape-24.html'}, {'name': 'Tern Verge S27H Bisiklet', 'link': 'https://www.epey.com/bisiklet/tern-verge-s27h.html'}, {'name': 'Dahon Briza D8 Bisiklet', 'link': 'https://www.epey.com/bisiklet/dahon-briza-d8.html'}, {'name': 'Kron XC100 24 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc100-24-man-v.html'}, {'name': 'Kron TX150L Lady V Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-tx150-lady-v.html'}, {'name': 'Kron XC450 27.5 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc450-27-5-man-hd.html'}, {'name': 'Whistle Guipago 1830 Bisiklet', 'link': 'https://www.epey.com/bisiklet/whistle-guipago-1830.html'}, {'name': 'Mosso 20 WildFire V Boys Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-20-wildfire-v-boys.html'}, {'name': 'Mosso City Life Nexus Man Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-city-life-nexus-man.html'}, {'name': 'Mosso 771TB3 DMD Acera Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-771tb3-dmd-acera.html'}, {'name': 'Mosso 735TCA 105 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-735tca-105.html'}, {'name': 'Mosso Groovy 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/mosso-groovy-27-5.html'}, {'name': 'Ghost Kato 4 Kid 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-kato-4-kid-24.html'}, {'name': 'Ghost Kato 2 Kid 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-kato-2-kid-20.html'}, {'name': 'Ghost Lawu 2 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/ghost-lawu-2-26.html'}, {'name': 'Carraro Daytona 2924 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-daytona-2924.html'}, {'name': 'Carraro Flexi 103 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-flexi-103.html'}, {'name': 'Carraro Sngerbob 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-sungerbob-20.html'}, {'name': 'Bianchi Bella 24 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-bella-24.html'}, {'name': 'Bianchi RCX 237 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-rcx-237.html'}, {'name': 'Bianchi Touring 411 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-touring-411.html'}, {'name': 'Salcano Sarajevo 26 Lady Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-sarajevo-26-lady.html'}, {'name': 'Salcano NG450 26 Lady HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng450-26-lady-hd.html'}, {'name': 'Salcano City Sport 40 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-sport-40-v.html'}, {'name': 'mit 2049 Monster High Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2049-monster-high.html'}, {'name': 'Cube Reaction GTC Race 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-reaction-gtc-race-27-5.html'}, {'name': 'Arbike 2901 Bisiklet', 'link': 'https://www.epey.com/bisiklet/arbike-2901.html'}, {'name': 'Arbike 2606 26 in Bisiklet', 'link': 'https://www.epey.com/bisiklet/arbike-2606.html'}, {'name': 'Salcano NG350 29 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng350-hd-29.html'}, {'name': 'Salcano NG750 24 Lady V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-ng750-lady-24.html'}, {'name': 'Cube Delhi Pro Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-delhi-pro.html'}, {'name': 'Cube Attain Race Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-attain-race.html'}, {'name': 'Cube Attain GTC SL Disk Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-attain-gtc-sl-disk.html'}, {'name': 'Cube Acid 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-acid-27-5.html'}, {'name': 'Cube Agree C:62 SL Bisiklet', 'link': 'https://www.epey.com/bisiklet/cube-agree-c62-sl.html'}, {'name': 'Merida BIG.NINE XT Edition 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/merida-big-nine-xt-edition-29.html'}, {'name': 'Merida BIG.SEVEN 1000 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/merida-big-seven-1000-27-5.html'}, {'name': 'Trek Superfly 5 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/trek-superfly-5-29.html'}, {'name': 'Geotech Manic Carbon 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/geotech-manic-carbon-29.html'}, {'name': 'Corratec Superbow Fun 29ER 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corratec-superbow-fun-29er-29.html'}, {'name': 'Corratec Dolomiti Sora 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/corratec-dolomiti-sora-28.html'}, {'name': 'Cannondale Supersix Evo Ultegra Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-supersix-evo-ultegra-4-28.html'}, {'name': 'Cannondale Bad Boy 4 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-bad-boy-4-28.html'}, {'name': 'Cannondale Trail Womens 5 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/cannondale-trail-womens-5-27-5.html'}, {'name': 'Schwinn Searcher 3 Men 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/schwinn-searcher-3-men-28.html'}, {'name': 'Geotech Path XC 4.4 20. Yil zel Seri 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/geotech-path-xc-4-4-20-yil-ozel-seri-26.html'}, {'name': 'Kron XC250 Lady 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-xc250-lady-26.html'}, {'name': 'Kron TX150 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/kron-tx150-hd.html'}, {'name': 'Salcano Igman 27.5 Deore Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-igman-deore-27-5.html'}, {'name': 'Salcano Astro 29 V Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-astro-v-29.html'}, {'name': 'Salcano City Wings 20 HD Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-city-wings-20-hd.html'}, {'name': 'Salcano XRS050 Claris Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-xrs050-claris.html'}, {'name': 'Salcano Tracker 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-tracker-20.html'}, {'name': 'Salcano Cappadocia Steel Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-cappadocia-steel.html'}, {'name': 'Salcano Assos 20 29 X1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-assos-20-x1-29.html'}, {'name': 'Salcano Assos 10 29 X1 Bisiklet', 'link': 'https://www.epey.com/bisiklet/salcano-assos-10-x1-29.html'}, {'name': 'Scott Contessa 640 Bisiklet', 'link': 'https://www.epey.com/bisiklet/scott-contessa-640-26.html'}, {'name': 'Tern Link B7 Bisiklet', 'link': 'https://www.epey.com/bisiklet/tern-link-b7-20.html'}, {'name': 'Bianchi Honey Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-honey-16.html'}, {'name': 'Bianchi Touring 405 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-touring-405-bayan-28.html'}, {'name': 'Bianchi AFX 7029 29 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-afx-7029-29.html'}, {'name': 'Bianchi RCX 426 Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-rcx-426-26.html'}, {'name': 'Bianchi Nitro Bisiklet', 'link': 'https://www.epey.com/bisiklet/bianchi-nitro-24.html'}, {'name': 'Carraro Sportive 327 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-sportive-327-28.html'}, {'name': 'Carraro Street 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-street-26.html'}, {'name': 'Carraro Big 629 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-big-629-29.html'}, {'name': 'Carraro Crs 620 26 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-crs-620-26.html'}, {'name': 'Sedona Black Code 8 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/sedona-black-code-8-27-5.html'}, {'name': 'Coranna 2491 Castor Bisiklet', 'link': 'https://www.epey.com/bisiklet/coranna-2491-castor.html'}, {'name': "mit 2842 City's Bisiklet", 'link': 'https://www.epey.com/bisiklet/umit-2842-citys-2842-citys.html'}, {'name': 'mit 2411 Rideon Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2411-rideon.html'}, {'name': 'mit 2056 Accrue 2D 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2056-accrue-2d-20.html'}, {'name': 'mit 1671 Superbomber 16 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-1671-superbomber-16.html'}, {'name': 'mit 2802 Taurus Man 28 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2802-taurus-man-28.html'}, {'name': 'mit 2053 Thunder 20 Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2053-thunder-20.html'}, {'name': 'mit 2965 Mirage V Bisiklet', 'link': 'https://www.epey.com/bisiklet/umit-2965-mirage-v.html'}, {'name': 'Gitane Fast Bisiklet', 'link': 'https://www.epey.com/bisiklet/gitane-fast.html'}, {'name': 'Carraro Kifuka 27.5 Bisiklet', 'link': 'https://www.epey.com/bisiklet/carraro-kifuka-27-5.html'}]
for i in bicycles:
url = i['link']
try:
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
webpage = urlopen(req).read()
except:
print("err in "+i['link'])
else:
print("Downloaded "+i['name']+" ", end="\r")
fileName = i['name'].replace('/','_')
f = open("./listItems/"+fileName+'.html', 'wb')
f.write(webpage)
f.close
| 906.181818 | 19,356 | 0.688353 |
86789c8feaa8d10751a8b27ad6e7fc323ebc39ff | 956 | py | Python | redmine/__init__.py | hugoseabra/redmine-task-generator | b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf | [
"MIT"
] | null | null | null | redmine/__init__.py | hugoseabra/redmine-task-generator | b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf | [
"MIT"
] | 4 | 2021-03-30T14:04:56.000Z | 2021-06-10T19:40:52.000Z | redmine/__init__.py | hugoseabra/redmine-task-generator | b5ce1764f1c7588a7c82b25f7dd4bf07d1c105cf | [
"MIT"
] | null | null | null | from django.conf import settings
from redminelib import Redmine as DefaultRedmine
from .validator import RedmineInstanceValidator
| 28.117647 | 62 | 0.696653 |
8678e0fdfc11399c75f91f8ec0af910ceb4aab00 | 3,212 | py | Python | python_survey/finished_files/main.py | trenton3983/PyCharmProjects | fae8653a25e07e7384eb0ddf6ea191adeb44face | [
"MIT"
] | null | null | null | python_survey/finished_files/main.py | trenton3983/PyCharmProjects | fae8653a25e07e7384eb0ddf6ea191adeb44face | [
"MIT"
] | null | null | null | python_survey/finished_files/main.py | trenton3983/PyCharmProjects | fae8653a25e07e7384eb0ddf6ea191adeb44face | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
from finished_files.survey_data_dictionary import DATA_DICTIONARY
# Load data
# We want to take the names list from our data dictionary
names = [x.name for x in DATA_DICTIONARY]
# Generate the list of names to import
usecols = [x.name for x in DATA_DICTIONARY if x.usecol]
# dtypes should be a dict of 'col_name' : dtype
dtypes = {x.name : x.dtype for x in DATA_DICTIONARY if x.dtype}
# same for converters
converters = {x.name : x.converter for x in DATA_DICTIONARY if x.converter}
df = pd.read_csv('data/survey.csv',
header=0,
names=names,
dtype=dtypes,
converters=converters,
usecols=usecols)
#%% Clean up data: remove disqualified users
# In the survey, any user who selected they don't use Python was then
# disqualified from the rest of the survey. So let's drop them here.
df = df[df['python_main'] != 'No, I dont use Python for my current projects']
# Considering we now only have two categories left:
# - Yes
# - No, I use Python for secondary projects only
# Let's turn it into a bool
df['python_main'] = df['python_main'] == 'Yes'
#%% Plot the web dev / data scientist ratio
# In the survey, respondents were asked to estimate the ratio between
# the amount of web developers vs the amount of data scientists. Afterwards
# they were asked what they thought the most popular answer would be.
# Let's see if there's a difference!
# This is a categorical data point, and it's already ordered in the data
# dictionary. So we shouldn't sort it after counting the values.
ratio_self = df['webdev_science_ratio_self'].value_counts(sort=False)
ratio_others = df['webdev_science_ratio_others'].value_counts(sort=False)
# Let's draw a bar chart comparing the distributions
fig = plt.figure()
ax = fig.add_subplot(111)
RATIO_COUNT = ratio_self.count()
x = np.arange(RATIO_COUNT)
WIDTH = 0.4
self_bars = ax.bar(x-WIDTH, ratio_self, width=WIDTH, color='b', align='center')
others_bars = ax.bar(x, ratio_others, width=WIDTH, color='g', align='center')
ax.set_xlabel('Ratios')
ax.set_ylabel('Observations')
labels = [str(lbl) for lbl in ratio_self.index]
ax.set_xticks(x - 0.5 * WIDTH)
ax.set_xticklabels(labels)
ax.legend((self_bars[0], others_bars[0]),
('Self', 'Most popular'))
plt.show()
#%% Calculate the predicted totals
# Let's recode the ratios to numbers, and calculate the means
CONVERSION = {
'10:1': 10,
'5:1' : 5,
'2:1' : 2,
'1:1' : 1,
'1:2' : 0.5,
'1:5' : 0.2,
'1:10': 0.1
}
self_numeric = df['webdev_science_ratio_self'] \
.replace(CONVERSION.keys(), CONVERSION.values())
others_numeric = df['webdev_science_ratio_others'] \
.replace(CONVERSION.keys(), CONVERSION.values())
print(f'Self:\t\t{self_numeric.mean().round(2)} web devs / scientist')
print(f'Others:\t\t{others_numeric.mean().round(2)} web devs / scientist')
#%% Is the difference statistically significant?
result = scipy.stats.chisquare(ratio_self, ratio_others)
# The null hypothesis is that they're the same. Let's see if we can reject it
print(result) | 31.184466 | 79 | 0.699253 |
8679399f0ab155a65d5523949359b9a4e0752af4 | 4,918 | py | Python | adet/modeling/embedmask/mask_pred.py | yinghdb/AdelaiDet | 94a9b7cde92fb039852f876964d991a1f3e15af4 | [
"BSD-2-Clause"
] | 3 | 2021-05-21T08:02:48.000Z | 2021-11-05T11:06:40.000Z | adet/modeling/embedmask/mask_pred.py | yinghdb/AdelaiDet | 94a9b7cde92fb039852f876964d991a1f3e15af4 | [
"BSD-2-Clause"
] | null | null | null | adet/modeling/embedmask/mask_pred.py | yinghdb/AdelaiDet | 94a9b7cde92fb039852f876964d991a1f3e15af4 | [
"BSD-2-Clause"
] | 1 | 2021-05-24T06:53:32.000Z | 2021-05-24T06:53:32.000Z | import torch
from torch.nn import functional as F
from torch import nn
from torch.autograd import Variable
from adet.utils.comm import compute_locations, aligned_bilinear
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted.float()).cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
| 36.42963 | 135 | 0.637861 |
86798d0504dd04df9298eafb92e49de14fb4653a | 3,804 | py | Python | cloudferry/actions/prechecks/check_vmax_prerequisites.py | SVilgelm/CloudFerry | 4459c0d21ba7ccffe51176932197b352e426ba63 | [
"Apache-2.0"
] | 6 | 2017-04-20T00:49:49.000Z | 2020-12-20T16:27:10.000Z | cloudferry/actions/prechecks/check_vmax_prerequisites.py | SVilgelm/CloudFerry | 4459c0d21ba7ccffe51176932197b352e426ba63 | [
"Apache-2.0"
] | 3 | 2017-04-08T15:47:16.000Z | 2017-05-18T17:40:59.000Z | cloudferry/actions/prechecks/check_vmax_prerequisites.py | SVilgelm/CloudFerry | 4459c0d21ba7ccffe51176932197b352e426ba63 | [
"Apache-2.0"
] | 8 | 2017-04-07T23:42:36.000Z | 2021-08-10T11:05:10.000Z | # Copyright 2016 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import getpass
import logging
from cloudferry.lib.base import exception
from cloudferry.lib.base.action import action
from cloudferry.lib.utils import local
from cloudferry.lib.utils import remote_runner
LOG = logging.getLogger(__name__)
| 41.347826 | 79 | 0.624869 |
8679dfd086b9b3768ebf7c42ebcbb01fc263b720 | 2,569 | py | Python | bongo/core.py | codeforamerica/bongo | a1b162c54fc51630ae1cfac16e1c136b0ff320a3 | [
"BSD-3-Clause"
] | null | null | null | bongo/core.py | codeforamerica/bongo | a1b162c54fc51630ae1cfac16e1c136b0ff320a3 | [
"BSD-3-Clause"
] | null | null | null | bongo/core.py | codeforamerica/bongo | a1b162c54fc51630ae1cfac16e1c136b0ff320a3 | [
"BSD-3-Clause"
] | 1 | 2021-04-17T10:21:05.000Z | 2021-04-17T10:21:05.000Z | """
A simple wrapper for the Bongo Iowa City bus API.
"""
import requests as req
| 27.623656 | 72 | 0.534838 |
867a45f315cdcc7854ea80a22125e3ed4f3423db | 1,671 | py | Python | src/security/tcp_flooding.py | janaSunrise/useful-python-snippets | f03285b8f0b44f87326ca982129dab80a18697f5 | [
"Apache-2.0"
] | 1 | 2021-03-15T16:48:05.000Z | 2021-03-15T16:48:05.000Z | src/security/tcp_flooding.py | janaSunrise/useful-python-snippets | f03285b8f0b44f87326ca982129dab80a18697f5 | [
"Apache-2.0"
] | null | null | null | src/security/tcp_flooding.py | janaSunrise/useful-python-snippets | f03285b8f0b44f87326ca982129dab80a18697f5 | [
"Apache-2.0"
] | null | null | null | import random
import socket
import string
import sys
import threading
import time
| 26.109375 | 88 | 0.591263 |
867a66ae5d5de251f9ea678d34a2cd01a411db2e | 443 | py | Python | src/ml_final_project/utils/evaluators/default.py | yuvalot/ml_final_project | fefb67c92504ceeb7999e49daa8a8aa5a60f1c61 | [
"MIT"
] | null | null | null | src/ml_final_project/utils/evaluators/default.py | yuvalot/ml_final_project | fefb67c92504ceeb7999e49daa8a8aa5a60f1c61 | [
"MIT"
] | null | null | null | src/ml_final_project/utils/evaluators/default.py | yuvalot/ml_final_project | fefb67c92504ceeb7999e49daa8a8aa5a60f1c61 | [
"MIT"
] | null | null | null | def default_evaluator(model, X_test, y_test):
"""A simple evaluator that takes in a model,
and a test set, and returns the loss.
Args:
model: The model to evaluate.
X_test: The features matrix of the test set.
y_test: The one-hot labels matrix of the test set.
Returns:
The loss on the test set.
"""
return model.evaluate(X_test, y_test, verbose=0)[0]
| 31.642857 | 61 | 0.598194 |
867addf71ccf4a2d2cc021bb3410dbc784317269 | 319 | py | Python | test.py | wangjm12138/Yolov3_wang | 3d143c7cd863dec796edede3faedacc6590cab5e | [
"MIT"
] | null | null | null | test.py | wangjm12138/Yolov3_wang | 3d143c7cd863dec796edede3faedacc6590cab5e | [
"MIT"
] | 8 | 2020-01-28T22:17:25.000Z | 2022-03-12T00:04:30.000Z | test.py | wangjm12138/Yolov3_wang | 3d143c7cd863dec796edede3faedacc6590cab5e | [
"MIT"
] | null | null | null | import random
yolo=Yolov3()
for data in yolo:
print(data)
| 16.789474 | 36 | 0.702194 |
867af3eaf92e62e8468d18b191fba31f6c76639c | 2,836 | py | Python | utils/dsp.py | huchenxucs/WaveRNN | 6d5805d54b8a3db99aa190083b550236f2c15d28 | [
"MIT"
] | null | null | null | utils/dsp.py | huchenxucs/WaveRNN | 6d5805d54b8a3db99aa190083b550236f2c15d28 | [
"MIT"
] | null | null | null | utils/dsp.py | huchenxucs/WaveRNN | 6d5805d54b8a3db99aa190083b550236f2c15d28 | [
"MIT"
] | null | null | null | import math
import numpy as np
import librosa
from utils import hparams as hp
from scipy.signal import lfilter
import soundfile as sf
'''
def build_mel_basis():
return librosa.filters.mel(hp.sample_rate, hp.n_fft, n_mels=hp.num_mels, fmin=hp.fmin)
'''
def reconstruct_waveform(mel, n_iter=32):
"""Uses Griffin-Lim phase reconstruction to convert from a normalized
mel spectrogram back into a waveform."""
denormalized = denormalize(mel)
amp_mel = db_to_amp(denormalized)
S = librosa.feature.inverse.mel_to_stft(
amp_mel, power=1, sr=hp.sample_rate,
n_fft=hp.n_fft, fmin=hp.fmin)
wav = librosa.core.griffinlim(
S, n_iter=n_iter,
hop_length=hp.hop_length, win_length=hp.win_length)
return wav
| 23.831933 | 91 | 0.645275 |
867b8e99cfbed437050f3b3f92884ccf95f2bf33 | 6,407 | py | Python | loldib/getratings/models/NA/na_talon/na_talon_jng.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_talon/na_talon_jng.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_talon/na_talon_jng.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
| 15.364508 | 46 | 0.761667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.