hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eb03e3a050ceea7bb9cd25f052a0aa3154068c30 | 1,830 | py | Python | run-length-encoding/run_length_encoding.py | geekmuse/exercism-python | 089efc0382147bd48f1e2d68c33ba4cbd58d3dfd | [
"MIT"
]
| null | null | null | run-length-encoding/run_length_encoding.py | geekmuse/exercism-python | 089efc0382147bd48f1e2d68c33ba4cbd58d3dfd | [
"MIT"
]
| null | null | null | run-length-encoding/run_length_encoding.py | geekmuse/exercism-python | 089efc0382147bd48f1e2d68c33ba4cbd58d3dfd | [
"MIT"
]
| null | null | null | def decode(to_be_decoded):
"""
Decodes a run-length encoded string.
:param to_be_decoded: run-length encoded string
:return: run-length decoded string
"""
to_be_decoded_list = list(to_be_decoded)
decoded_str_as_list = list()
num_to_print_as_list = list()
for c in to_be_decoded_list:
if c.isdigit():
num_to_print_as_list.append(c)
else:
if len(num_to_print_as_list) > 0:
num_to_print = int(''.join(num_to_print_as_list))
append = c * num_to_print
decoded_str_as_list.append(append)
num_to_print_as_list = list()
else:
decoded_str_as_list.append(c)
return ''.join(decoded_str_as_list)
def encode(to_be_encoded):
"""
Run-length encodes a string
:param to_be_encoded: string to be run-length encoded
:return: run-length encoded string
"""
last_seen = None
last_seen_count = 0
to_be_encoded_as_list = list(to_be_encoded)
encoded_str_as_list = list()
for c in to_be_encoded_as_list:
if last_seen:
if last_seen == c:
last_seen_count += 1
else:
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
encoded_str_as_list.append('{}'.format(last_seen))
last_seen_count = 1
else:
last_seen_count += 1
last_seen = c
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
if last_seen:
encoded_str_as_list.append('{}'.format(last_seen))
else:
encoded_str_as_list = list()
return ''.join(encoded_str_as_list)
| 30 | 89 | 0.595082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.169945 |
eb0791e28d8a88a76f9e3bcff8a0767061c1499e | 3,816 | py | Python | pytorch/benchmarks/operator_benchmark/pt/conv_test.py | raghavnauhria/whatmt | c20483a437c82936cb0fb8080925e37b9c4bba87 | [
"MIT"
]
| null | null | null | pytorch/benchmarks/operator_benchmark/pt/conv_test.py | raghavnauhria/whatmt | c20483a437c82936cb0fb8080925e37b9c4bba87 | [
"MIT"
]
| 1 | 2019-07-22T09:48:46.000Z | 2019-07-22T09:48:46.000Z | pytorch/benchmarks/operator_benchmark/pt/conv_test.py | raghavnauhria/whatmt | c20483a437c82936cb0fb8080925e37b9c4bba87 | [
"MIT"
]
| null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for Conv1d and ConvTranspose1d operators.
"""
# Configs for conv-1d ops
conv_1d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 1, 64],
[16, 33, 3, 2, 16, 128],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "L"
],
tags=["short"]
)
class Conv1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, L):
self.input = torch.rand(N, in_c, L)
self.conv1d = nn.Conv1d(in_c, out_c, kernel, stride=stride)
self.set_module_name("Conv1d")
def forward(self):
return self.conv1d(self.input)
class ConvTranspose1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, L):
self.input = torch.rand(N, in_c, L)
self.convtranspose1d = nn.ConvTranspose1d(in_c, out_c, kernel, stride=stride)
self.set_module_name("ConvTranspose1d")
def forward(self):
return self.convtranspose1d(self.input)
op_bench.generate_pt_test(conv_1d_configs, Conv1dBenchmark)
op_bench.generate_pt_test(conv_1d_configs, ConvTranspose1dBenchmark)
"""
Microbenchmarks for Conv2d and ConvTranspose2d operators.
"""
# Configs for Conv2d and ConvTranspose1d
conv_2d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 1, 32, 32],
[16, 33, 3, 2, 16, 64, 64],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "H", "W"
],
tags=["short"]
)
class Conv2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, H, W):
self.input = torch.rand(N, in_c, H, W)
self.conv2d = nn.Conv2d(in_c, out_c, kernel, stride=stride)
self.set_module_name("Conv2d")
def forward(self):
return self.conv2d(self.input)
class ConvTranspose2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, H, W):
self.input = torch.rand(N, in_c, H, W)
self.convtranspose2d = nn.ConvTranspose2d(in_c, out_c, kernel, stride=stride)
self.set_module_name("ConvTranspose2d")
def forward(self):
return self.convtranspose2d(self.input)
op_bench.generate_pt_test(conv_2d_configs, Conv2dBenchmark)
op_bench.generate_pt_test(conv_2d_configs, ConvTranspose2dBenchmark)
"""
Microbenchmarks for Conv3d and ConvTranspose3d operators.
"""
# Configs for Conv3d and ConvTranspose3d
conv_3d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 8, 4, 32, 32],
[16, 33, 3, 2, 16, 8, 64, 64],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "D", "H", "W"
],
tags=["short"]
)
class Conv3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, D, H, W):
self.input = torch.rand(N, in_c, D, H, W)
self.conv3d = nn.Conv3d(in_c, out_c, kernel, stride=stride)
self.set_module_name("Conv3d")
def forward(self):
return self.conv3d(self.input)
class ConvTranspose3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, D, H, W):
self.input = torch.rand(N, in_c, D, H, W)
self.convtranspose3d = nn.ConvTranspose3d(in_c, out_c, kernel, stride=stride)
self.set_module_name("ConvTranspose3d")
def forward(self):
return self.convtranspose3d(self.input)
op_bench.generate_pt_test(conv_3d_configs, Conv3dBenchmark)
op_bench.generate_pt_test(conv_3d_configs, ConvTranspose3dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 27.453237 | 85 | 0.673742 | 2,097 | 0.549528 | 0 | 0 | 0 | 0 | 0 | 0 | 520 | 0.136268 |
eb083967d51239e917a7b39eeaa1d72f732ba81d | 1,605 | py | Python | local_test/course_search/nyuapi/request.py | NYUSHer/Widgets | b630d01331ca0101778fc7ca44fff7b65412f9ef | [
"MIT"
]
| 1 | 2018-05-01T06:04:39.000Z | 2018-05-01T06:04:39.000Z | local_test/course_search/nyuapi/request.py | NYUSHer/Widgets | b630d01331ca0101778fc7ca44fff7b65412f9ef | [
"MIT"
]
| null | null | null | local_test/course_search/nyuapi/request.py | NYUSHer/Widgets | b630d01331ca0101778fc7ca44fff7b65412f9ef | [
"MIT"
]
| null | null | null | import requests as R
class reqNYU():
TOKEN = ""
BASEURI = "https://sandbox.api.it.nyu.edu/"
def __init__(self, token=""):
if not token:
raise Exception("[Error] Token can not be empty!")
self.TOKEN = token
self.ping()
def ping(self):
try:
req = R.get("https://sandbox.api.it.nyu.edu/course-catalog-exp/", headers={
"Authorization": "Bearer " + self.TOKEN
}, timeout=10)
except R.exceptions.ReadTimeout:
raise Exception("[Error] NYU API not responding!")
if req.text.find("Invalid or missing token") > -1:
raise Exception("[Error] Token is not valid!")
def rawReq(self, uri="", params={}):
print("A request has been sent.")
try:
req = R.get(self.BASEURI + uri, data=params, headers={
"Authorization": "Bearer " + self.TOKEN
}, timeout=10)
except R.exceptions.ReadTimeout:
raise Exception("[Error] NYU API not responding!")
return req.json()
def repeatReq(self, url="", params={}):
"""
server will send request repeatedly until valid response is received.
However, if token invalid msg keep appearing, the server will halt.
Therefore, a server monitor is needed.
"""
counter = 0
while 1:
response = self.rawReq(url, params)
counter += 1
if isinstance(response, list):
break
if counter > 10:
self.ping()
return response
| 32.1 | 87 | 0.544548 | 1,581 | 0.985047 | 0 | 0 | 0 | 0 | 0 | 0 | 537 | 0.334579 |
eb0939a06759c9dcb9a5c2eda6614c361061cde9 | 857 | py | Python | pySINGLE/setup.py | piomonti/pySINGLE | 2eae0b31334d8eae08fd7f96f591262c4abcf3d9 | [
"MIT"
]
| 3 | 2015-12-21T15:14:08.000Z | 2018-12-29T10:15:03.000Z | pySINGLE/setup.py | piomonti/pySINGLE | 2eae0b31334d8eae08fd7f96f591262c4abcf3d9 | [
"MIT"
]
| null | null | null | pySINGLE/setup.py | piomonti/pySINGLE | 2eae0b31334d8eae08fd7f96f591262c4abcf3d9 | [
"MIT"
]
| null | null | null | #from distutils.core import setup
#from distutils.extension import Extension
#from Cython.Distutils import build_ext
#import numpy
#setup(
#cmdclass = {'build_ext': build_ext},
#ext_modules = [Extension("Z_shooting", ["Z_shooting.c"],)],
#include_dirs=[numpy.get_include(),'.', ],
#)
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
#extension = [Extension("Z_shooting", ["Z_shooting.c"],),]
setup(
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("FastFused_01", ["FastFused_01.pyx"], include_dirs=[numpy.get_include()])]
)
#setup(
#cmdclass = {'build_ext': build_ext},
#ext_modules = cythonize("FastFused_01.pyx"),
#include_dirs=[numpy.get_include(),'.', ],
#)
| 27.645161 | 104 | 0.687281 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 524 | 0.611435 |
eb0a67e0dac6431fa8a950d7b99db76a91a069c7 | 11,877 | py | Python | cnnlstm/preprocessing.py | mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020 | 6467a08640990f2d07e517adf7bacd566fb442c4 | [
"MIT"
]
| null | null | null | cnnlstm/preprocessing.py | mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020 | 6467a08640990f2d07e517adf7bacd566fb442c4 | [
"MIT"
]
| null | null | null | cnnlstm/preprocessing.py | mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020 | 6467a08640990f2d07e517adf7bacd566fb442c4 | [
"MIT"
]
| null | null | null | import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from data_processing.helpers import Config
class Load:
def __init__(self,train_sales='',calendar=''):
"""
Read CSV files for daily sales and calendar input data respectively.
Args:
train_sales (str): file path for daily sales input data
calendar (str): file path for calendar input data
Attributes:
train_sales (dataframe): daily sales input data
calendar (dataframe): calendar input data
float_cols (arr): list of daily sales with dtype "float64"
int_cols (arr): list of daily sales with dtype "int32" or "int64"
"""
self.train_sales = pd.read_csv(train_sales)
self.calendar = pd.read_csv(calendar)
self.float_cols = [c for c in self.train_sales if self.train_sales[c].dtype == "float64"]
self.int_cols = [c for c in self.train_sales if self.train_sales[c].dtype in ["int64","int32"]]
def downcast_dtypes(self):
"""
Downcast daily sales input data to reduce memory usage.
Attributes:
train_sales (dataframe): downcasted daily sales input data
Returns:
dataframe: downcasted daily sales input data
"""
self.train_sales[self.float_cols] = self.train_sales[self.float_cols].astype(np.float32)
self.train_sales[self.int_cols] = self.train_sales[self.int_cols].astype(np.int16)
return self.train_sales
class Preprocess:
# Preprocess: remove id, item_id, dept_id, cat_id, store_id, state_id columns
def __init__(self,loaded_train_sales,loaded_calendar,startDay=350):
"""
Load preprocessing parameters.
Args:
loaded_train_sales (dataframe): daily sales input data
loaded_calendar (dataframe): calendar input data
startDay (int): start day
Attributes:
loaded_train_sales (dataframe): daily sales input data
calendar (dataframe): calendar input data
daysBeforeEvent1 (dataframe): input daily data of festive events
daysBeforeEvent2 (dataframe): input daily data of sporting events
snap_CA (dataframe): input daily data of SNAP program in California
snap_TX (dataframe): input daily data of SNAP program in Texas
snap_WI (dataframe): input daily data of SNAP program in Wisconsin
"""
# Remove the first 350 days in train sales data due to zero_inflated data
self.loaded_train_sales = loaded_train_sales.T[6 + startDay:]
self.calendar = loaded_calendar
# Initialize a dataframe with zeros for 1969 days in the calendar
self.daysBeforeEvent1 = pd.DataFrame(np.zeros((1969,1)))
self.daysBeforeEvent2 = pd.DataFrame(np.zeros((1969,1)))
self.snap_CA = pd.DataFrame(np.zeros((1969,1)))
self.snap_TX = pd.DataFrame(np.zeros((1969,1)))
self.snap_WI = pd.DataFrame(np.zeros((1969,1)))
def label_calendar(self):
"""
Label days with festive or sporting events, SNAP programs in California, Texas or Wisconsin.
Attributes:
daysBeforeEvent1 (dataframe): input daily data of festive events
daysBeforeEvent2 (dataframe): input daily data of sporting events
snap_CA (dataframe): input daily data of SNAP program in California
snap_TX (dataframe): input daily data of SNAP program in Texas
snap_WI (dataframe): input daily data of SNAP program in Wisconsin
Returns:
dataframe: input daily data of festive events
dataframe: input daily data of sporting events
dataframe: input daily data of SNAP program in California
dataframe: input daily data of SNAP program in Texas
dataframe: input daily data of SNAP program in Wisconsin
"""
for x,y in self.calendar.iterrows():
if((pd.isnull(self.calendar["event_name_1"][x])) == False):
self.daysBeforeEvent1[0][x-1] = 1
if((pd.isnull(self.calendar["event_name_2"][x])) == False):
self.daysBeforeEvent2[0][x-1] = 1
if((pd.isnull(self.calendar["snap_CA"][x])) == False):
self.snap_CA[0][x] = 1
if((pd.isnull(self.calendar["snap_TX"][x])) == False):
self.snap_TX[0][x] = 1
if((pd.isnull(self.calendar["snap_WI"][x])) == False):
self.snap_WI[0][x] = 1
return self.daysBeforeEvent1, self.daysBeforeEvent2, self.snap_CA, self.snap_TX, self.snap_WI
class SplitDataset:
# split dataset into evaluation (last 2 weeks), validation (first 2 weeks), training
def __init__(self, loaded_train_sales,
daysBeforeEvent1, daysBeforeEvent2,
snap_CA, snap_TX, snap_WI, startDay=350):
"""
Generate training (startDay to day 1941), evaluation (day 1941 to 1969) and validation (day 1913 to 1941) datasets.
Args:
load_train_sales (dataframe): daily sales input data
daysBeforeEvent1 (dataframe): input daily data of festive events
daysBeforeEvent2 (dataframe): input daily data of sporting events
snap_CA (dataframe): input daily data of SNAP program in California
snap_TX (dataframe): input daily data of SNAP program in Texas
snap_WI (dataframe): input daily data of SNAP program in Wisconsin
startDay (int): start day
Attributes:
load_train_sales (dataframe): daily sales input data
daysBeforeEvent1_train (dataframe): input daily data of festive events (training)
daysBeforeEvent2_train (dataframe): input daily data of sporting events (training)
snap_CA_train (dataframe): input daily data of SNAP program in California (training)
snap_TX_train (dataframe): input daily data of SNAP program in Texas (training)
snap_WI_train (dataframe): input daily data of SNAP program in Wisconsin (training)
daysBeforeEvent1_eval (dataframe): input daily data of festive events (evaluation)
daysBeforeEvent2_eval (dataframe): input daily data of sporting events (evaluation)
snap_CA_eval (dataframe): input daily data of SNAP program in California (evaluation)
snap_TX_eval (dataframe): input daily data of SNAP program in Texas (evaluation)
snap_WI_eval (dataframe): input daily data of SNAP program in Wisconsin (evaluation)
daysBeforeEvent1_valid (dataframe): input daily data of festive events (validation)
daysBeforeEvent2_valid (dataframe): input daily data of sporting events (validation)
snap_CA_valid (dataframe): input daily data of SNAP program in California (validation)
snap_TX_valid (dataframe): input daily data of SNAP program in Texas (validation)
snap_WI_valid (dataframe): input daily data of SNAP program in Wisconsin (validation)
"""
# Remove the first 350 days in train sales data due to zero_inflated data
self.loaded_train_sales = loaded_train_sales
# input for predicting validation period day 1941 to 1969
self.daysBeforeEvent1_eval = daysBeforeEvent1[1941:]
self.daysBeforeEvent2_eval = daysBeforeEvent2[1941:]
self.snap_CA_eval = snap_CA[1941:]
self.snap_TX_eval = snap_TX[1941:]
self.snap_WI_eval = snap_WI[1941:]
# input for predicting validation period day 1913 to 1941
self.daysBeforeEvent1_valid = daysBeforeEvent1[1913:1941]
self.daysBeforeEvent2_valid = daysBeforeEvent2[1913:1941]
self.snap_CA_valid = snap_CA[1913:1941]
self.snap_TX_valid = snap_TX[1913:1941]
self.snap_WI_valid = snap_WI[1913:1941]
# input for training as a feature
self.daysBeforeEvent1_train = daysBeforeEvent1[startDay:1941]
self.daysBeforeEvent2_train = daysBeforeEvent2[startDay:1941]
self.snap_CA_train = snap_CA[startDay:1941]
self.snap_TX_train = snap_TX[startDay:1941]
self.snap_WI_train = snap_WI[startDay:1941]
def concatenate(self):
"""
Generate a daily sales input data with the presence of events and SNAP program at day level.
Attributes:
concat_train_sales (dataframe): input daily data of sales, presence of events and SNAP program
Returns:
dataframe: input daily data of sales, presence of events and SNAP program
"""
#Before concatanation with our main data "dt", indexes are made same and column name is changed to "oneDayBeforeEvent"
self.daysBeforeEvent1_train.columns = ["oneDayBeforeEvent1"]
self.daysBeforeEvent1_train.index = self.loaded_train_sales.index
self.daysBeforeEvent2_train.columns = ["oneDayBeforeEvent2"]
self.daysBeforeEvent2_train.index = self.loaded_train_sales.index
self.snap_CA_train.columns = ["snap_CA"]
self.snap_CA_train.index = self.loaded_train_sales.index
self.snap_TX_train.columns = ["snap_TX"]
self.snap_TX_train.index = self.loaded_train_sales.index
self.snap_WI_train.columns = ["snap_WI"]
self.snap_WI_train.index = self.loaded_train_sales.index
self.concat_train_sales = pd.concat([self.loaded_train_sales, self.daysBeforeEvent1_train,
self.daysBeforeEvent2_train, self.snap_CA_train,
self.snap_TX_train, self.snap_WI_train], axis = 1, sort=False)
return self.concat_train_sales
class ScalingTrainSales:
def __init__(self,concat_train_sales,feature_range=(0,1),startDay=350, config_path=''):
"""
Load parameters for scaling features in input data.
Args:
concat_train_sales (dataframe): input daily data of sales, presence of events and SNAP program
feature_range ((int, int)): the scaling range
startDay (int): start day
config_path (str): file path for config.yaml
Attributes:
concat_train_sales (dataframe): input daily data of sales, presence of events and SNAP program
feature_range ((int, int)): the scaling range
X_train (arr): training inputs
y_train (arr): test inputs
startDay (int): start day
config (dict): parameter configurations from config.yaml
timesteps (int): number of timesteps
"""
self.concat_train_sales = concat_train_sales
self.feature_range = feature_range
self.X_train = []
self.y_train = []
self.startDay = startDay
self.config = Config(config_path)
self.timesteps = self.config.timesteps
def gen_train_data(self):
"""
Generate training dataset using Min-Max scaler.
Attributes:
X_train (arr): training inputs with dimensions
[n_timeseries, n_timesteps, n_features]
y_train (arr): test inputs with dimensions
[n_timeseries, n_pred_products]
Returns:
arr: training inputs with dimensions
[n_timeseries, n_timesteps, n_features]
arr: test inputs with dimensions
[n_timeseries, n_pred_products]
obj: scaler
"""
sc = MinMaxScaler(feature_range=self.feature_range)
train_sales_scaled = sc.fit_transform(self.concat_train_sales)
for i in range(self.timesteps, 1941 - self.startDay):
self.X_train.append(train_sales_scaled[i-self.timesteps:i])
self.y_train.append(train_sales_scaled[i][0:30490])
#Convert to np array to be able to feed the LSTM model
self.X_train = np.array(self.X_train)
self.y_train = np.array(self.y_train)
return self.X_train, self.y_train, sc
| 46.214008 | 126 | 0.667088 | 11,730 | 0.987623 | 0 | 0 | 0 | 0 | 0 | 0 | 6,928 | 0.583312 |
eb0ac6a6f7fdd1cf17fa0a0d491c03fde96fdfc1 | 331 | py | Python | Physics250-ME3738/timeIntervalBlinks.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
]
| null | null | null | Physics250-ME3738/timeIntervalBlinks.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
]
| null | null | null | Physics250-ME3738/timeIntervalBlinks.py | illusion173/Physics250 | 69f2ffdb8af013e8b0739779861c1455b579ddaf | [
"MIT"
]
| null | null | null | import math
speedofLight = 2.9979*pow(10,8)
def timeIntervalBlinks():
time = float(input('Input Time (sec): '))
speed = float(input('Speed: '))
speed = speed * pow(10,8)
gamma = math.sqrt(1/(1-pow((speed/speedofLight),2)))
answer = gamma * time
print(answer)
timeIntervalBlinks()
| 18.388889 | 56 | 0.592145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.087613 |
eb1051fc036fc84c631af126f696c0417323ff9f | 419 | py | Python | daemons/area-deletion-daemon/app.py | sampierson/upload-service | b7c470706f729bdee34a4254555f798558877095 | [
"MIT"
]
| 6 | 2018-01-31T19:44:17.000Z | 2020-02-20T13:03:09.000Z | daemons/area-deletion-daemon/app.py | sampierson/upload-service | b7c470706f729bdee34a4254555f798558877095 | [
"MIT"
]
| 379 | 2018-03-21T21:29:15.000Z | 2020-01-28T14:20:48.000Z | daemons/area-deletion-daemon/app.py | HumanCellAtlas/staging-service | b7c470706f729bdee34a4254555f798558877095 | [
"MIT"
]
| 5 | 2018-03-09T14:13:15.000Z | 2020-01-30T15:49:46.000Z | import json
from upload.common.upload_area import UploadArea
# This lambda function is invoked by messages in the the area_deletion_queue (AWS SQS).
# The queue and the lambda function are connected via aws_lambda_event_source_mapping
def delete_upload_area(event, context):
unwrapped_event = json.loads(event["Records"][0]["body"])
area_uuid = unwrapped_event["area_uuid"]
UploadArea(area_uuid).delete()
| 38.090909 | 87 | 0.785203 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.472554 |
eb10a721ce3034d767a4ccb9040dc682a3ffb0b4 | 3,089 | py | Python | engine/geometry/tests/test_overlap_detection_2d.py | codehearts/pickles-fetch-quest | ca9b3c7fe26acb50e1e2d654d068f5bb953bc427 | [
"MIT"
]
| 3 | 2017-12-07T19:17:36.000Z | 2021-07-29T18:24:25.000Z | engine/geometry/tests/test_overlap_detection_2d.py | codehearts/pickles-fetch-quest | ca9b3c7fe26acb50e1e2d654d068f5bb953bc427 | [
"MIT"
]
| 41 | 2017-11-11T06:00:08.000Z | 2022-03-28T23:27:25.000Z | engine/geometry/tests/test_overlap_detection_2d.py | codehearts/pickles-fetch-quest | ca9b3c7fe26acb50e1e2d654d068f5bb953bc427 | [
"MIT"
]
| 2 | 2018-08-31T23:49:00.000Z | 2021-09-21T00:42:48.000Z | from ..overlap_detection_2d import detect_overlap_2d
from unittest.mock import call, Mock, patch
import unittest
class TestOverlap2d(unittest.TestCase):
"""Test two dimensional overlap detection functions."""
def setUp(self):
"""Creates a ``self.first`` and ``self.second`` Mock object."""
self.first = Mock(x=1, y=2, width=3, height=4)
self.second = Mock(x=5, y=6, width=7, height=8)
@patch('engine.geometry.overlap_detection_2d.detect_overlap_1d')
def test_2d_rectangles_no_overlap(self, mock_detect_1d):
"""False when no overlap exists on x and y axes."""
mock_detect_1d.side_effect = [False, False] # Return value sequence
self.assertFalse(detect_overlap_2d(self.first, self.second),
"False positive with no axes overlapping")
mock_detect_1d.assert_has_calls([
call(self.first.x, self.first.width,
self.second.x, self.second.width),
call(self.first.y, self.first.height,
self.second.y, self.second.height),
])
@patch('engine.geometry.overlap_detection_2d.detect_overlap_1d')
def test_2d_rectangles_x_axis_overlap(self, mock_detect_1d):
"""False when overlap only exists on x axis."""
mock_detect_1d.side_effect = [True, False] # Return value sequence
self.assertFalse(detect_overlap_2d(self.first, self.second),
"False positive with only one axis overlapping")
mock_detect_1d.assert_has_calls([
call(self.first.x, self.first.width,
self.second.x, self.second.width),
call(self.first.y, self.first.height,
self.second.y, self.second.height),
])
@patch('engine.geometry.overlap_detection_2d.detect_overlap_1d')
def test_2d_rectangles_y_axis_overlap(self, mock_detect_1d):
"""False when overlap only exists on y axis."""
mock_detect_1d.side_effect = [False, True] # Return value sequence
self.assertFalse(detect_overlap_2d(self.first, self.second),
"False positive with only one axis overlapping")
mock_detect_1d.assert_has_calls([
call(self.first.x, self.first.width,
self.second.x, self.second.width),
call(self.first.y, self.first.height,
self.second.y, self.second.height),
])
@patch('engine.geometry.overlap_detection_2d.detect_overlap_1d')
def test_2d_rectangles_both_axes_overlap(self, mock_detect_1d):
"""True when overlap exists on both axes."""
mock_detect_1d.side_effect = [True, True] # Return value sequence
self.assertTrue(detect_overlap_2d(self.first, self.second),
"False negative with only both axes overlapping")
mock_detect_1d.assert_has_calls([
call(self.first.x, self.first.width,
self.second.x, self.second.width),
call(self.first.y, self.first.height,
self.second.y, self.second.height),
])
| 42.315068 | 76 | 0.642279 | 2,973 | 0.962447 | 0 | 0 | 2,645 | 0.856264 | 0 | 0 | 806 | 0.260926 |
eb10c1e56faa83018c15d8d04331071eb6bc524c | 786 | py | Python | PythonTest/Aula18A.py | MatthewsTomts/Python_Class | f326d521d62c45a4fcb429d2a22cf2ab958492cb | [
"MIT"
]
| null | null | null | PythonTest/Aula18A.py | MatthewsTomts/Python_Class | f326d521d62c45a4fcb429d2a22cf2ab958492cb | [
"MIT"
]
| null | null | null | PythonTest/Aula18A.py | MatthewsTomts/Python_Class | f326d521d62c45a4fcb429d2a22cf2ab958492cb | [
"MIT"
]
| null | null | null | teste = list()
teste.append('Matheus')
teste.append(17)
galera = [teste[:]] # Cria uma copia de teste dentro de galera
teste[0] = 'Oliver'
teste[1] = 22
galera.append(teste) # Cria um vínculo entre teste e galera
print(galera)
pessoas = [['Harvey', 23], ['Madeleine', 19], ['Roger', 250], ['Mark', 20]]
print(pessoas[0][0]) # Mostra o primeiro valor da primeira lista desta lista
for p in pessoas:
print(f'{p[0]} tem {p[1]} anos de idade.')
dados = []
pes = []
for i in range(0, 3):
print('-='*10)
dados.append(input('Nome: '))
dados.append(int(input('Idade: ')))
pes.append(dados[:])
dados.clear() # Excluí os valores dentro de dados
for p in pes:
print(f'{p[0]} é maior de idade.' if p[1] > 20 else f'{p[0]} é menor de idade.')
# Exercício 84 -89
| 27.103448 | 84 | 0.624682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.445006 |
eb15265b18824ec201a18ed59e673270072c7e83 | 756 | py | Python | src/bio2bel/exc.py | aman527/bio2bel | 631328261a8e7ebddf2eab6c271bc4bc42fbcba4 | [
"MIT"
]
| 16 | 2018-05-18T13:25:44.000Z | 2022-03-15T02:32:28.000Z | src/bio2bel/exc.py | aman527/bio2bel | 631328261a8e7ebddf2eab6c271bc4bc42fbcba4 | [
"MIT"
]
| 42 | 2017-09-13T20:16:46.000Z | 2021-05-08T19:24:30.000Z | src/bio2bel/exc.py | aman527/bio2bel | 631328261a8e7ebddf2eab6c271bc4bc42fbcba4 | [
"MIT"
]
| 5 | 2020-03-14T17:08:12.000Z | 2021-04-13T20:19:19.000Z | # -*- coding: utf-8 -*-
"""Bio2BEL custom errors."""
class Bio2BELMissingNameError(TypeError):
"""Raised when an abstract manager is subclassed and instantiated without overriding the module name."""
class Bio2BELModuleCaseError(TypeError):
"""Raised when the module name in a subclassed and instantiated manager is not all lowercase."""
class Bio2BELMissingModelsError(TypeError):
"""Raises when trying to build a flask admin app with no models defined."""
class Bio2BELTestMissingManagerError(TypeError):
"""Raised when "Manager" was not set as a class-level variable.."""
class Bio2BELManagerTypeError(TypeError):
"""Raised when the class-level variable "Manager" is not a subclass of :class:`bio2bel.AbstractManager`."""
| 31.5 | 111 | 0.747354 | 687 | 0.90873 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.661376 |
eb17d457b2e3da5e9c6ce129bda974e0910d6212 | 1,967 | py | Python | tencentcloud/cat/v20180409/errorcodes.py | HS-Gray/tencentcloud-sdk-python | b28b19c4beebc9f361aa3221afa36ad1ee047ccc | [
"Apache-2.0"
]
| 37 | 2017-10-12T01:50:42.000Z | 2022-02-24T02:44:45.000Z | tencentcloud/cat/v20180409/errorcodes.py | HS-Gray/tencentcloud-sdk-python | b28b19c4beebc9f361aa3221afa36ad1ee047ccc | [
"Apache-2.0"
]
| null | null | null | tencentcloud/cat/v20180409/errorcodes.py | HS-Gray/tencentcloud-sdk-python | b28b19c4beebc9f361aa3221afa36ad1ee047ccc | [
"Apache-2.0"
]
| 12 | 2018-07-31T10:04:56.000Z | 2022-02-07T00:08:06.000Z | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 数据库查询错误。
FAILEDOPERATION_DBQUERYFAILED = 'FailedOperation.DbQueryFailed'
# 数据库创建失败。
FAILEDOPERATION_DBRECORDCREATEFAILED = 'FailedOperation.DbRecordCreateFailed'
# 数据库更新失败。
FAILEDOPERATION_DBRECORDUPDATEFAILED = 'FailedOperation.DbRecordUpdateFailed'
# ES查询错误。
FAILEDOPERATION_ESQUERYERROR = 'FailedOperation.ESQueryError'
# 无有效节点。
FAILEDOPERATION_NOVALIDNODES = 'FailedOperation.NoValidNodes'
# 账单欠费。
FAILEDOPERATION_ORDEROUTOFCREDIT = 'FailedOperation.OrderOutOfCredit'
# 资源不存在。
FAILEDOPERATION_RESOURCENOTFOUND = 'FailedOperation.ResourceNotFound'
# 任务未运行。
FAILEDOPERATION_TASKNOTRUNNING = 'FailedOperation.TaskNotRunning'
# 任务未暂停。
FAILEDOPERATION_TASKNOTSUSPENDED = 'FailedOperation.TaskNotSuspended'
# 任务状态不允许当前操作。
FAILEDOPERATION_TASKOPERATIONNOTALLOW = 'FailedOperation.TaskOperationNotAllow'
# 批量拨测任务的类型不相同。
FAILEDOPERATION_TASKTYPENOTSAME = 'FailedOperation.TaskTypeNotSame'
# 试用任务量超时。
FAILEDOPERATION_TRIALTASKEXCEED = 'FailedOperation.TrialTaskExceed'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
| 26.945205 | 82 | 0.804779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,609 | 0.720555 |
eb198ab0970d6a0544631c5838cde74baffd6fdc | 8,851 | py | Python | Website/Ogre/points/tests.py | pringyy/OGRE | 26bd1bc06e1d14129b8922da8c9bb7f21b7ec457 | [
"MIT"
]
| null | null | null | Website/Ogre/points/tests.py | pringyy/OGRE | 26bd1bc06e1d14129b8922da8c9bb7f21b7ec457 | [
"MIT"
]
| null | null | null | Website/Ogre/points/tests.py | pringyy/OGRE | 26bd1bc06e1d14129b8922da8c9bb7f21b7ec457 | [
"MIT"
]
| null | null | null | from django.contrib.auth import get_user_model
from django.test import TestCase
from django.contrib.staticfiles import finders
from django.urls import reverse
from .models import StudentProfileInfo, User
from .forms import UserForm, ContactForm, UserProfileInfoForm
class IndexPageTest(TestCase):
# Page can only be used when user is logged in, so create user and log them in
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'testpassword')
self.client.login(username='test', password='testpassword')
self.response = self.client.get(reverse('index'))
# tests that login page uses login url
def test_index_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that index page uses template
def test_index_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/index.html')
# tests that the index page displays a users points to them
def test_index_page_displays_points(self):
self.assertContains(self.response, 'Points')
# tests that user can't see index page when logged out
def test_index_page_restricted_when_logged_out(self):
# Log user out and obtain response again
self.client.logout()
self.response = self.client.get(reverse('index'))
self.assertEquals(self.response.status_code, 302)
class LoginPageTest(TestCase):
def setUp(self):
self.response = self.client.get(reverse('login'))
# tests that login page uses login url
def test_login_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that login page uses template
def test_login_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/login.html')
def test_login_page_has_required_fields(self):
self.assertContains(self.response, 'StudentID')
self.assertContains(self.response, 'Password')
self.assertContains(self.response, 'Username')
# tests that login page contains sign in button
def test_login_page_has_sign_in_button(self):
self.assertContains(self.response, "Sign in")
# tests that login page contains register button
def test_login_page_has_register_button(self):
self.assertContains(self.response, "Register here")
class FAQPageTest(TestCase):
def setUp(self):
self.response = self.client.get(reverse('faq'))
# tests that faq page uses faq url
def test_faq_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that faq page uses template
def test_faq_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/faq.html')
class ProfilePageTest(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', '[email protected]', 'testpassword')
self.client.login(username='test', password='testpassword')
self.response = self.client.get(reverse('profile'))
# tests that profile page uses profile url
def test_profile_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that profile page uses template
def test_profile_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/profile.html')
# tests that profile page displays points
def test_profile_page_displays_points(self):
self.assertContains(self.response, "Total Points")
class ThanksPageTest(TestCase):
def setUp(self):
self.response = self.client.get(reverse('thanks'))
# tests that thanks page uses thanks url
def test_thanks_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that thanks page uses template
def test_thanks_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/thanks.html')
# tests that thanks page displays thank you message
def test_thanks_pages_provides_message(self):
self.assertContains(self.response, 'Thanks for submitting the contact form!')
class ContactPageTest(TestCase):
def setUp(self):
self.response = self.client.get(reverse('contact'))
# tests that contact page uses contact url
def test_contact_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that contact page uses template
def test_contact_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/contact.html')
class RegisterPageTest(TestCase):
def setUp(self):
self.response = self.client.get(reverse('register'))
# tests that register page uses register url
def test_register_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that register page uses template
def test_register_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/register.html')
def test_register_page_has_required_fields(self):
self.assertContains(self.response, 'StudentID')
self.assertContains(self.response, 'Password')
self.assertContains(self.response, 'Username')
# tests that login page contains register button
def test_register_page_has_register_button(self):
self.assertContains(self.response, "Register")
class AboutPageTest(TestCase):
def setUp(self):
self.response = self.client.get(reverse('about'))
# tests that about page uses about url
def test_about_view_url_by_name(self):
self.assertEquals(self.response.status_code, 200)
# tests that about page uses template
def test_about_page_uses_templates(self):
self.assertTemplateUsed(self.response, 'points/about.html')
def test_about_page_has_title(self):
self.assertIn(b'<title>', self.response.content)
self.assertIn(b'</title>', self.response.content)
# tests that the about page correctly displays the name of the staff
def test_about_page_contains_staff(self):
self.assertIn(b'Alastair Innes', self.response.content)
self.assertIn(b'Robert Pringle', self.response.content)
self.assertIn(b'Catriona Murphy', self.response.content)
self.assertIn(b'Mingfeng Ye', self.response.content)
self.assertIn(b'Harry Yau', self.response.content)
class StudentProfileTests(TestCase):
def test_student_profile(self):
# create User object
User = get_user_model()
user = User.objects.create_user('allyinnes99', '[email protected]', 'bad_password')
# create Student object
student = StudentProfileInfo()
student.user = user
student.StudentID = '2317070i'
student.spentPoints = 50
student.totalPoints = 150
student.currentPoints = student.totalPoints - student.spentPoints
student.save()
record = StudentProfileInfo.objects.get(pk=1)
self.assertEqual(record, student)
class UserFormTests(TestCase):
# tests if the user form is valid when given valid data
def test_user_form_valid(self):
form = UserForm(
data={'username': "user123", 'password': "password123"})
self.assertTrue(form.is_valid())
# tests if the user form is invalid when given invalid data
def test_user_form_invalid(self):
form = UserForm(data={'studentID': "2317070i", 'password': "password123"})
self.assertFalse(form.is_valid())
class UserProfileInfoFormTests(TestCase):
# tests is the user profile form is valid when given valid data
def test_user_profile_info_form(self):
form = UserProfileInfoForm(data={"StudentID": "2317070i", "profile_pic": "test.jpg"})
self.assertTrue(form.is_valid())
# tests if the user profile form is valid with no profile picture provided
def test_user_profile_info_form_with_no_profile_pic(self):
form = UserProfileInfoForm(data={"StudentID": "2317070i"})
self.assertTrue(form.is_valid())
# tests if the user profile form is invalid when given invalid data
def test_user_profile_form_invalid(self):
form = UserForm(data={'password': "password123"})
self.assertFalse(form.is_valid())
class ContactFormTests(TestCase):
# tests if the contact form is valid when given valid data
def test_contact_form_valid(self):
form = ContactForm(
data={'contact_name': 'Test', 'contact_email': "[email protected]", "subject": "test", "content": "This is a test!"})
self.assertTrue(form.is_valid())
# tests if the contact form is invalid when given invalid data
def test_contact_form_invalid(self):
form = ContactForm(data={'contact_email': "[email protected]", "subject": "test", "content": "This is a test!"})
self.assertFalse((form.is_valid()))
| 35.979675 | 119 | 0.711219 | 8,549 | 0.96588 | 0 | 0 | 0 | 0 | 0 | 0 | 2,617 | 0.295673 |
eb1aab5b6a3a998c629d8d9ed3c85dc9531c3cbf | 6,248 | py | Python | py2.5/processing/reduction.py | geofft/multiprocess | d998ffea9e82d17662b12b94a236182e7fde46d5 | [
"BSD-3-Clause"
]
| 356 | 2015-06-21T21:05:10.000Z | 2022-03-30T11:57:08.000Z | py2.5/processing/reduction.py | geofft/multiprocess | d998ffea9e82d17662b12b94a236182e7fde46d5 | [
"BSD-3-Clause"
]
| 103 | 2015-06-22T01:44:14.000Z | 2022-03-01T03:44:25.000Z | py2.5/processing/reduction.py | geofft/multiprocess | d998ffea9e82d17662b12b94a236182e7fde46d5 | [
"BSD-3-Clause"
]
| 72 | 2015-09-02T14:10:24.000Z | 2022-03-25T06:49:43.000Z | #
# Module to support the pickling of different types of connection
# objects and file objects so that they can be transferred between
# different processes.
#
# processing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = []
import os
import sys
import socket
import threading
import copy_reg
import processing
from processing import _processing
from processing.logger import debug, subDebug, subWarning
from processing.forking import thisThreadIsSpawning
from processing.process import _registerAfterFork
#
#
#
connections_are_picklable = (
sys.platform == 'win32' or hasattr(_processing, 'recvFd')
)
try:
fromfd = socket.fromfd
except AttributeError:
def fromfd(fd, family, type, proto=0):
s = socket._socket.socket()
_processing.changeFd(s, fd, family, type, proto)
return s
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from processing._processing import win32
closeHandle = win32.CloseHandle
def duplicateHandle(handle):
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle,
_subprocess.GetCurrentProcess(),
0, False, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
def sendHandle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle,
process_handle, 0, False, _subprocess.DUPLICATE_SAME_ACCESS
)
conn.send(new_handle.Detach())
finally:
win32.CloseHandle(process_handle)
def recvHandle(conn):
return conn.recv()
def isInheritableHandle(handle):
return (win32.GetHandleInformation(handle) & win32.HANDLE_FLAG_INHERIT)
else:
closeHandle = os.close
duplicateHandle = os.dup
def sendHandle(conn, handle, destination_pid):
_processing.sendFd(conn.fileno(), handle)
def recvHandle(conn):
return _processing.recvFd(conn.fileno())
def isInheritableHandle(handle):
return True
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
closeHandle(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
_registerAfterFork(_reset, _reset)
def _getListener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
from processing.connection import Listener
debug('starting listener and thread for sending handles')
_listener = Listener(authenticate=True)
t = threading.Thread(target=_serve)
t.setDaemon(True)
t.start()
finally:
_lock.release()
return _listener
def _serve():
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
sendHandle(conn, handle_wanted, destination_pid)
closeHandle(handle_wanted)
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
if not processing.currentProcess()._exiting:
import traceback
subWarning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduceHandle(handle):
if thisThreadIsSpawning() and isInheritableHandle(handle):
return (None, handle, True)
dup_handle = duplicateHandle(handle)
_cache.add(dup_handle)
subDebug('reducing handle %d', handle)
return (_getListener().address, dup_handle, False)
def rebuildHandle(pickled_data):
from processing.connection import Client
address, handle, inherited = pickled_data
if inherited:
return handle
subDebug('rebuilding handle %d', handle)
conn = Client(address, authenticate=True)
conn.send((handle, os.getpid()))
new_handle = recvHandle(conn)
conn.close()
return new_handle
#
# Register `_processing.Connection` with `copy_reg`
#
def reduceConnection(conn):
return rebuildConnection, (reduceHandle(conn.fileno()),)
def rebuildConnection(reduced_handle):
fd = rebuildHandle(reduced_handle)
return _processing.Connection(fd, duplicate=False)
copy_reg.pickle(_processing.Connection, reduceConnection)
#
# Register `socket.socket` with `copy_reg`
#
def reduceSocket(s):
try:
Family, Type, Proto = s.family, s.type, s.proto
except AttributeError:
# have to guess family, type, proto
address = s.getsockname()
Family = type(address) is str and socket.AF_UNIX or socket.AF_INET
Type = s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)
Proto = 0
reduced_handle = reduceHandle(s.fileno())
return rebuildSocket, (reduced_handle, Family, Type, Proto)
def rebuildSocket(reduced_handle, family, type, proto):
fd = rebuildHandle(reduced_handle)
_sock = fromfd(fd, family, type, proto)
closeHandle(fd)
return socket.socket(_sock=_sock)
copy_reg.pickle(socket.socket, reduceSocket)
#
# Register `_processing.PipeConnection` with `copy_reg`
#
if sys.platform == 'win32':
def reducePipeConnection(conn):
return rebuildPipeConnection, (reduceHandle(conn.fileno()),)
def rebuildPipeConnection(reduced_handle):
handle = rebuildHandle(reduced_handle)
return _processing.PipeConnection(handle, duplicate=False)
copy_reg.pickle(_processing.PipeConnection, reducePipeConnection)
| 28.52968 | 80 | 0.639725 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 815 | 0.130442 |
eb1afd11fd2f6d89e9d5a3d5e84072981f86d593 | 570 | py | Python | data-structures/print-the-elements-of-a-linked-list-in-reverse.py | gajubadge11/HackerRank-1 | 7b136ccaa1ed47ae737467ace6b494c720ccb942 | [
"MIT"
]
| 340 | 2018-06-17T19:45:56.000Z | 2022-03-22T02:26:15.000Z | data-structures/print-the-elements-of-a-linked-list-in-reverse.py | gajubadge11/HackerRank-1 | 7b136ccaa1ed47ae737467ace6b494c720ccb942 | [
"MIT"
]
| 3 | 2021-02-02T17:17:29.000Z | 2021-05-18T10:06:04.000Z | data-structures/print-the-elements-of-a-linked-list-in-reverse.py | gajubadge11/HackerRank-1 | 7b136ccaa1ed47ae737467ace6b494c720ccb942 | [
"MIT"
]
| 229 | 2019-04-20T08:28:49.000Z | 2022-03-31T04:23:52.000Z | """
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def ReversePrint(head):
if head is None:
return
else:
out = []
node = head
while node != None:
out.append(node.data)
node = node.next
print("\n".join(map(str, out[::-1])))
| 16.285714 | 68 | 0.522807 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.480702 |
eb1bfe5091ca2f0f84f38e9d762348c024630c00 | 9,088 | py | Python | cfd/cfd_rel_perms.py | lanetszb/vofpnm | 520544db894fb13e44a86e989bd17b4690e996d3 | [
"MIT"
]
| null | null | null | cfd/cfd_rel_perms.py | lanetszb/vofpnm | 520544db894fb13e44a86e989bd17b4690e996d3 | [
"MIT"
]
| null | null | null | cfd/cfd_rel_perms.py | lanetszb/vofpnm | 520544db894fb13e44a86e989bd17b4690e996d3 | [
"MIT"
]
| null | null | null | # MIT License
#
# Copyright (c) 2020 Aleksandr Zhuravlyov and Zakhar Lanets
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import json
import pandas as pd
import copy
import matplotlib.pyplot as plt
import time as tm
from matplotlib import rc
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../../'))
from netgrid import save_files_collection_to_file
from matplotlib.ticker import FormatStrFormatter
from vofpnm.cfd.ini_class import Ini
from vofpnm.cfd.cfd_class import Cfd
from vofpnm.helpers import plot_rel_perms, plot_conesrvation_check, plot_viscs_vels, plot_av_sat, \
plot_capillary_pressure_curve, plot_capillary_pressures
# rc('text', usetex=True)
# plt.rcParams["font.family"] = "Times New Roman"
start_time = tm.time()
ini = Ini(config_file=sys.argv[1])
cfd = Cfd(ini)
visc_0 = ini.paramsPnm['visc_0']
visc_1 = ini.paramsPnm['visc_1']
ini.throats_viscs = np.tile(visc_0, ini.netgrid.throats_N)
cfd.run_pnm()
throats_volumes = cfd.ini.throats_volumes
# ### validation with openFoam ###
test_case_vofpnm = dict()
times_alpha_avs = dict()
times_u_mgn_avs = dict()
times_F_avs = dict()
times_F_avs_new = dict()
times_V_in = dict()
thrs_velocities_to_output = dict()
thrs_alphas_to_output = dict()
nus = {'1': visc_0, '2': visc_1}
rhos = {'1': ini.paramsPnm['b_dens_fluid1'], '2': ini.paramsPnm['b_dens_fluid1']}
test_case_vofpnm['mus'] = nus
test_case_vofpnm['rhos'] = rhos
test_case_vofpnm['sigma'] = ini.ift
# ### validation with openfoam one-phase ###
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
u_mgn_av = np.sum((throats_volumes * throats_vels)) / np.sum(throats_volumes)
test_case_vofpnm['ref_u_mgn'] = u_mgn_av
print('ref_u_mgn', u_mgn_av)
throats_widths = np.absolute(np.array(list(cfd.ini.throats_widths.values())))
av_width = np.sum((throats_volumes * throats_widths)) / np.sum(throats_volumes)
test_case_vofpnm['width'] = av_width
ini.flow_0_ref = cfd.calc_rel_flow_rate()
print('flow_0_ref', ini.flow_0_ref)
visc_1 = ini.paramsPnm['visc_1']
ini.throats_viscs = np.tile(visc_1, ini.netgrid.throats_N)
cfd.run_pnm()
ini.flow_1_ref = cfd.calc_rel_flow_rate()
cfd.calc_coupling_params()
cfd.run_pnm()
rel_perms_0 = []
rel_perms_1 = []
capillary_numbers = []
capillary_pressures = []
av_sats = []
throats_volumes = cfd.ini.throats_volumes
throats_av_sats = cfd.ini.equation.throats_av_sats
dens_0 = cfd.ini.paramsPnm['dens_0']
mass_already_in = copy.deepcopy(np.sum(throats_volumes * throats_av_sats * dens_0))
mass_rates_in = []
mass_rates_out = []
masses_inside = []
times = []
viscs = []
vol_rates_in = []
vol_rates_out = []
#################
# Paraview output
#################
os.system('rm -r inOut/*.vtu')
os.system('rm -r inOut/*.pvd')
sats_dict = dict()
file_name = 'inOut/collection.pvd'
files_names = list()
files_descriptions = list()
cells_arrays = cfd.process_paraview_data()
cfd.ini.netgrid.cells_arrays = cells_arrays
files_names.append(str(0) + '.vtu')
files_descriptions.append(str(0))
cfd.ini.netgrid.save_cells('inOut/' + files_names[-1])
save_files_collection_to_file(file_name, files_names, files_descriptions)
#################
time = [0]
time_steps = []
cour_number = np.empty([])
time_curr = 0
time_step_curr = 0
time_output_freq = cfd.ini.time_period / 500.
round_output_time = int(ini.round_output_time)
output_time_step = ini.output_time_step
time_bound = output_time_step
is_output_step = False
is_last_step = False
out_idx = int(0)
while True:
if cfd.ini.time_step_type == 'const':
cfd.ini.time_step = cfd.ini.const_time_step
elif cfd.ini.time_step_type == 'flow_variable':
cfd.ini.time_step = cfd.ini.local.calc_flow_variable_time_step(
cfd.ini.throats_velocities)
elif cfd.ini.time_step_type == 'div_variable':
cfd.ini.time_step = cfd.ini.local.calc_div_variable_time_step(
cfd.ini.equation.sats[cfd.ini.equation.i_curr], cfd.ini.throats_velocities)
time_step_curr = cfd.ini.time_step
if time_curr + time_step_curr >= time_bound:
time_step_curr = time_bound - time_curr
time_bound += output_time_step
is_output_step = True
if time_curr + time_step_curr >= cfd.ini.time_period:
is_last_step = True
if not is_output_step:
time_step_curr = cfd.ini.time_period - time_curr
time_steps.append(time_step_curr)
time_curr += time_step_curr
cfd.ini.equation.cfd_procedure_one_step(cfd.ini.throats_velocities, time_step_curr)
cfd.calc_coupling_params()
mass_inside = copy.deepcopy(np.sum(throats_volumes * throats_av_sats * dens_0))
masses_inside.append(mass_inside)
vol_rate_in, vol_rate_out, vol_rate_in_0, vol_rate_out_1 = cfd.calc_flow_rates(mass_rates_in,
mass_rates_out)
vol_rates_out.append(vol_rate_out_1)
cfd.calc_rel_perms(rel_perms_0, rel_perms_1, capillary_numbers, capillary_pressures,
av_sats, ini.flow_0_ref, ini.flow_1_ref, vol_rate_in_0)
print('time_step: ', round(time_step_curr, round_output_time))
time.append(time_curr)
cfd.ini.equation.print_cour_numbers(cfd.ini.throats_velocities, cfd.ini.time_step)
print(' percentage executed:', round((time_curr / cfd.ini.time_period * 100.), 2), '%.', '\n')
cfd.run_pnm()
cells_arrays = cfd.process_paraview_data()
if is_output_step:
cfd.ini.netgrid.cells_arrays = cells_arrays
files_names.append(str(round(time_curr, round_output_time)) + '.vtu')
files_descriptions.append(str(round(time_curr, round_output_time)))
cfd.ini.netgrid.save_cells('inOut/' + files_names[-1])
save_files_collection_to_file(file_name, files_names, files_descriptions)
out_idx += 1
is_output_step = False
####### validation with openfoam #######
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
u_mgn_av = np.sum(throats_volumes * throats_vels) / np.sum(throats_volumes)
alpha_av = np.sum(throats_volumes * throats_av_sats) / np.sum(throats_volumes)
F_av = np.sum(throats_volumes * throats_vels * throats_av_sats) / np.sum(
throats_volumes * throats_vels)
times_u_mgn_avs[str(round(time_curr, round_output_time))] = u_mgn_av
times_alpha_avs[str(round(time_curr, round_output_time))] = alpha_av
times_F_avs[str(round(time_curr, round_output_time))] = F_av
times_F_avs_new[str(round(time_curr, round_output_time))] = (
vol_rate_out - vol_rate_out_1) / vol_rate_out
times_V_in[str(round(time_curr, round_output_time))] = vol_rate_in
####### validation with openfoam #######
print(str(round(time_curr, round_output_time)), time_curr)
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
throats_viscs = cfd.ini.throats_viscs
visc = np.sum(cfd.ini.throats_volumes * throats_viscs) / np.sum(cfd.ini.throats_volumes)
times.append(time_curr)
viscs.append(visc)
vol_rates_in.append(vol_rate_in)
if is_last_step:
break
execution_time = tm.time() - start_time
print("--- %s seconds ---" % execution_time)
#############
# Rel perms validation output
#############
test_case_vofpnm['times_alpha_avs'] = times_alpha_avs
test_case_vofpnm['times_u_mgn_avs'] = times_u_mgn_avs
test_case_vofpnm['times_F_avs'] = times_F_avs
test_case_vofpnm['times_F_avs_new'] = times_F_avs_new
test_case_vofpnm['execution_time'] = execution_time
test_case_vofpnm['time_step'] = cfd.ini.output_time_step
test_case_vofpnm['grid_volume'] = cfd.ini.grid_volume
test_case_vofpnm['total_volume'] = np.sum(throats_volumes)
test_case_vofpnm['times_V_in'] = times_V_in
json_file_u_mgns = 'inOut/validation/tmp.json'
with open(json_file_u_mgns, 'w') as f:
json.dump(test_case_vofpnm, f, sort_keys=False, indent=4 * ' ', ensure_ascii=False)
| 36.943089 | 125 | 0.725682 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,959 | 0.215559 |
eb1c6dbf88d8c9f286de25fdab3e7030a01a795a | 10,209 | py | Python | python/gameduino/base.py | Godzil/gameduino | 3a9d04b9820ca7edb04df4583fa14300e913fdb9 | [
"BSD-3-Clause"
]
| 17 | 2016-09-13T09:11:03.000Z | 2020-09-30T03:31:15.000Z | python/gameduino/base.py | lambdamikel/gameduino | 3a9d04b9820ca7edb04df4583fa14300e913fdb9 | [
"BSD-3-Clause"
]
| null | null | null | python/gameduino/base.py | lambdamikel/gameduino | 3a9d04b9820ca7edb04df4583fa14300e913fdb9 | [
"BSD-3-Clause"
]
| 12 | 2017-07-03T21:57:41.000Z | 2021-11-02T17:47:23.000Z | import struct
ascii_glyphs = [
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18, 0x00,
0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x7f, 0x36, 0x7f, 0x36, 0x36, 0x00,
0x0c, 0x3f, 0x68, 0x3e, 0x0b, 0x7e, 0x18, 0x00, 0x60, 0x66, 0x0c, 0x18, 0x30, 0x66, 0x06, 0x00,
0x38, 0x6c, 0x6c, 0x38, 0x6d, 0x66, 0x3b, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0c, 0x18, 0x30, 0x30, 0x30, 0x18, 0x0c, 0x00, 0x30, 0x18, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00,
0x00, 0x18, 0x7e, 0x3c, 0x7e, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00,
0x3c, 0x66, 0x6e, 0x7e, 0x76, 0x66, 0x3c, 0x00, 0x18, 0x38, 0x18, 0x18, 0x18, 0x18, 0x7e, 0x00,
0x3c, 0x66, 0x06, 0x0c, 0x18, 0x30, 0x7e, 0x00, 0x3c, 0x66, 0x06, 0x1c, 0x06, 0x66, 0x3c, 0x00,
0x0c, 0x1c, 0x3c, 0x6c, 0x7e, 0x0c, 0x0c, 0x00, 0x7e, 0x60, 0x7c, 0x06, 0x06, 0x66, 0x3c, 0x00,
0x1c, 0x30, 0x60, 0x7c, 0x66, 0x66, 0x3c, 0x00, 0x7e, 0x06, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x00,
0x3c, 0x66, 0x66, 0x3c, 0x66, 0x66, 0x3c, 0x00, 0x3c, 0x66, 0x66, 0x3e, 0x06, 0x0c, 0x38, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x18, 0x18, 0x30,
0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x7e, 0x00, 0x00, 0x00,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x3c, 0x66, 0x0c, 0x18, 0x18, 0x00, 0x18, 0x00,
0x3c, 0x66, 0x6e, 0x6a, 0x6e, 0x60, 0x3c, 0x00, 0x3c, 0x66, 0x66, 0x7e, 0x66, 0x66, 0x66, 0x00,
0x7c, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x7c, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x60, 0x66, 0x3c, 0x00,
0x78, 0x6c, 0x66, 0x66, 0x66, 0x6c, 0x78, 0x00, 0x7e, 0x60, 0x60, 0x7c, 0x60, 0x60, 0x7e, 0x00,
0x7e, 0x60, 0x60, 0x7c, 0x60, 0x60, 0x60, 0x00, 0x3c, 0x66, 0x60, 0x6e, 0x66, 0x66, 0x3c, 0x00,
0x66, 0x66, 0x66, 0x7e, 0x66, 0x66, 0x66, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7e, 0x00,
0x3e, 0x0c, 0x0c, 0x0c, 0x0c, 0x6c, 0x38, 0x00, 0x66, 0x6c, 0x78, 0x70, 0x78, 0x6c, 0x66, 0x00,
0x60, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7e, 0x00, 0x63, 0x77, 0x7f, 0x6b, 0x6b, 0x63, 0x63, 0x00,
0x66, 0x66, 0x76, 0x7e, 0x6e, 0x66, 0x66, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00,
0x7c, 0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x6a, 0x6c, 0x36, 0x00,
0x7c, 0x66, 0x66, 0x7c, 0x6c, 0x66, 0x66, 0x00, 0x3c, 0x66, 0x60, 0x3c, 0x06, 0x66, 0x3c, 0x00,
0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x00, 0x63, 0x63, 0x6b, 0x6b, 0x7f, 0x77, 0x63, 0x00,
0x66, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0x66, 0x00, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x00,
0x7e, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x7e, 0x00, 0x7c, 0x60, 0x60, 0x60, 0x60, 0x60, 0x7c, 0x00,
0x00, 0x60, 0x30, 0x18, 0x0c, 0x06, 0x00, 0x00, 0x3e, 0x06, 0x06, 0x06, 0x06, 0x06, 0x3e, 0x00,
0x18, 0x3c, 0x66, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x1c, 0x36, 0x30, 0x7c, 0x30, 0x30, 0x7e, 0x00, 0x00, 0x00, 0x3c, 0x06, 0x3e, 0x66, 0x3e, 0x00,
0x60, 0x60, 0x7c, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x66, 0x3c, 0x00,
0x06, 0x06, 0x3e, 0x66, 0x66, 0x66, 0x3e, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x7e, 0x60, 0x3c, 0x00,
0x1c, 0x30, 0x30, 0x7c, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x3e, 0x66, 0x66, 0x3e, 0x06, 0x3c,
0x60, 0x60, 0x7c, 0x66, 0x66, 0x66, 0x66, 0x00, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x3c, 0x00,
0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x70, 0x60, 0x60, 0x66, 0x6c, 0x78, 0x6c, 0x66, 0x00,
0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x36, 0x7f, 0x6b, 0x6b, 0x63, 0x00,
0x00, 0x00, 0x7c, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x7c, 0x66, 0x66, 0x7c, 0x60, 0x60, 0x00, 0x00, 0x3e, 0x66, 0x66, 0x3e, 0x06, 0x07,
0x00, 0x00, 0x6c, 0x76, 0x60, 0x60, 0x60, 0x00, 0x00, 0x00, 0x3e, 0x60, 0x3c, 0x06, 0x7c, 0x00,
0x30, 0x30, 0x7c, 0x30, 0x30, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x3e, 0x00,
0x00, 0x00, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x63, 0x6b, 0x6b, 0x7f, 0x36, 0x00,
0x00, 0x00, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x3e, 0x06, 0x3c,
0x00, 0x00, 0x7e, 0x0c, 0x18, 0x30, 0x7e, 0x00, 0x0c, 0x18, 0x18, 0x70, 0x18, 0x18, 0x0c, 0x00,
0x18, 0x18, 0x18, 0x00, 0x18, 0x18, 0x18, 0x00, 0x30, 0x18, 0x18, 0x0e, 0x18, 0x18, 0x30, 0x00,
0x31, 0x6b, 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
]
from gameduino.registers import *
# BaseGameduino is the common base for the Gameduino objects in remote and sim
class BaseGameduino(object):
def coldstart(self):
self.wr(J1_RESET, 1)
self.fill(RAM_PIC, 0, 10 * 1024)
for i in range(512):
self.sprite(i, 400, 400, 0, 0, 0)
self.wr16(SPR_DISABLE, 0)
self.wr16(SPR_PAGE, 0)
self.wr16(JK_MODE, 0)
self.wr16(SCROLL_X, 0)
self.wr16(SCROLL_Y, 0)
self.wr16(BG_COLOR, 0)
self.wr16(SAMPLE_L, 0)
self.wr16(SAMPLE_R, 0)
self.wr16(SCREENSHOT_Y, 0)
def dump(self, a, l):
""" Dump ``l`` bytes memory starting at address ``a`` """
for i in range(0, l, 16):
d16 = self.rdstr(a + i, 16)
print "%04x %s" % (a + i, " ".join(["%02x" % ord(c) for c in d16]))
def wr(self, a, v):
""" Write a single byte ``v`` to address ``a``. """
self.wrstr(a, chr(v))
def fill(self, a, v, c):
""" Fill ``c`` bytes of memory at address ``a`` with value ``v`` """
self.wrstr(a, chr(v) * c)
def putstr(self, x, y, v):
""" Write string ``v`` at screen position (x,y) """
a = y * 64 + x
self.wrstr(a, v)
def rd16(self, a):
return struct.unpack("<H", self.rdstr(a, 2))[0]
def rd32(self, a):
return struct.unpack("<L", self.rdstr(a, 4))[0]
def wr16(self, a, v):
""" Write 16-bit value ``v`` at to address ``a`` """
self.wrstr(a, struct.pack("<H", v))
def wr32(self, a, v):
""" Write 32-bit value ``v`` at to address ``a`` """
self.wrstr(a, struct.pack("<L", v))
def setpal(self, pal, rgb):
self.wr16(RAM_PAL + (pal << 1), rgb);
def ascii(self):
stretch = [
0x00, 0x03, 0x0c, 0x0f,
0x30, 0x33, 0x3c, 0x3f,
0xc0, 0xc3, 0xcc, 0xcf,
0xf0, 0xf3, 0xfc, 0xff ]
gstr = ""
for i in range(768):
b = ascii_glyphs[i]
h = stretch[b >> 4]
l = stretch[b & 0xf]
gstr += chr(h)
gstr += chr(l)
self.wrstr(0x1000 + (16 * ord(' ')), gstr)
for i in range(0x20, 0x80):
self.setpal(4 * i + 0, TRANSPARENT);
self.setpal(4 * i + 3, RGB(255,255,255));
self.fill(RAM_PIC, ord(' '), 4096);
def voice(self, v, wave, freq, lamp, ramp = None):
"""
Set the state of a voice.
:param v: voice number 0-63
:type v: int
:param wave: wave type, 0 for sine 1 for noise
:type wave: int
:param freq: frequency control, in quarter-hertz
:type freq: int
:param lamp: left amplitude 0-255
:type lamp: int
:param ramp: right amplitude 0-255, defaults to same ``lamp``
:type ramp: int
"""
if ramp is None:
ramp = lamp
self.wr32(VOICES + (4 * v), freq | (wave << 15) | (lamp << 16) | (ramp << 24))
def silence(self):
""" Switch all voices off """
for i in range(64):
self.voice(i, 0, 4 * 440, 0, 0)
def copy(self, a, v):
self.wrstr(a, v)
def microcode(self, src):
"""
Halt coprocessor, load microprogram, restart coprocessor
:param src: the microprogram, as a string, expressed
:type src: string
The string is loaded into the Gameduino's microprogram area,
and can be up to 256 bytes. For example, to load the
:ref:`splitscreen` microprogram::
splitscreen_code = open("splitscreen.binle", "b").read()
gd.microcode(splitscreen_code)
"""
self.wr(J1_RESET, 1)
self.copy(J1_CODE, src)
self.wr(J1_RESET, 0)
def sprite(self, spr, x, y, image, palette, rot, jk = 0):
"""
Set the state of a hardware sprite
:param spr: sprite number 0-511
:param x: x coordinate
:param y: y coordinate
:param image: sprite source image 0-63
:param palette: sprite palette select, 0-15, see below
:param rot: sprite rotate control 0-7, see :ref:`rotate`
:param jk: collision class control, 0-1
Palette select controls the number of colors used for the sprite, the source palette, and which data bits
to use as source.
"""
self.wr32(RAM_SPR + (4 * spr),
(x & 511) | ((rot & 7) << 9) | ((palette & 15) << 12) | ((y & 511) << 16) | ((image & 63) << 25) | ((jk & 1) << 31))
def im(self):
"""
Return the current screen as a 400x300 RGB PIL Image::
>>> import gameduino.sim
>>> gd = gameduino.sim.Gameduino()
>>> gd.im().save("screenshot.png")
"""
return self._im()
| 49.081731 | 134 | 0.54687 | 4,680 | 0.458419 | 0 | 0 | 0 | 0 | 0 | 0 | 2,102 | 0.205897 |
eb1e1aaec21c57363587a62326c0cc891182c577 | 183 | py | Python | sdm/__init__.py | DarthNoxix/noxixcogs | 794571b7d155e40f6bfb6ba7c31b0a7f025e3d59 | [
"MIT"
]
| null | null | null | sdm/__init__.py | DarthNoxix/noxixcogs | 794571b7d155e40f6bfb6ba7c31b0a7f025e3d59 | [
"MIT"
]
| null | null | null | sdm/__init__.py | DarthNoxix/noxixcogs | 794571b7d155e40f6bfb6ba7c31b0a7f025e3d59 | [
"MIT"
]
| null | null | null | from .sdm import Sdm
__red_end_user_data_statement__ = (
"This cog does not persistently store data or metadata about users."
)
async def setup(bot):
bot.add_cog(Sdm(bot))
| 18.3 | 72 | 0.73224 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.256831 | 68 | 0.371585 |
eb1e990c875a84c89463cedf50afc813143a16f2 | 1,330 | py | Python | GUI/WifiMonitor/UDP/Utils/gpio_mapping.py | gchinellato/XD | f6c0134030c5e229a7b9c2621311c5204aed77af | [
"MIT"
]
| 1 | 2019-10-15T20:31:39.000Z | 2019-10-15T20:31:39.000Z | GUI/WifiMonitor/Utils/gpio_mapping.py | gchinellato/XD | f6c0134030c5e229a7b9c2621311c5204aed77af | [
"MIT"
]
| null | null | null | GUI/WifiMonitor/Utils/gpio_mapping.py | gchinellato/XD | f6c0134030c5e229a7b9c2621311c5204aed77af | [
"MIT"
]
| null | null | null | #!/usr/bin/python
"""
*************************************************
* @Project: Self Balance
* @Description: GPIO Mapping
* @Owner: Guilherme Chinellato
* @Email: [email protected]
*************************************************
"""
"""
#
#Arduino GPIO
#
4x encoder (INT0-D2, INT1-D3, D4, D7)
4x motor enable (D5, D6, D11, D12)
2x PWM (D9, D10)
2x I2C (SCL-A5, SDA-A4)
"""
'''
Deprecated (replaced to Arduino)
#
#Motors GPIOs
#
#Motor A & B PWM outputs (BCM pinout)
MA_PWM_GPIO = 19
MB_PWM_GPIO = 26
#Motor A & B enable outputs (BCM pinout)
MA_CLOCKWISE_GPIO = 5
MA_ANTICLOCKWISE_GPIO = 6
MB_CLOCKWISE_GPIO = 20
MB_ANTICLOCKWISE_GPIO = 21
#
#Encoders GPIOs
#
#Enconders 1 & 2 for each motor (BCM pinout)
MA_ENCODER_1 = 12
MA_ENCODER_2 = 13
MB_ENCODER_1 = 7
MB_ENCODER_2 = 8
'''
#
#PanTilt GPIOs
#
#MicroServo Vertical and Horizontal outputs (BCM pinout)
SERVO_V_GPIO = 18
SERVO_H_GPIO = 23
'''Servo mapping for servoblaster:
0 on P1-7 GPIO-4
1 on P1-11 GPIO-17
*2 on P1-12 GPIO-18*
3 on P1-13 GPIO-27
4 on P1-15 GPIO-22
*5 on P1-16 GPIO-23*
6 on P1-18 GPIO-24
7 on P1-22 GPIO-25'''
#Servo pins
SERVO_H = '2' #pin 12 BCM 18
SERVO_V = '5' #pin 16 BCM 23
| 18.472222 | 69 | 0.566165 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,252 | 0.941353 |
eb1fb044cf839bde93fe0d603ce5bba8c4e8cccd | 449 | py | Python | vks/vulkanglobals.py | geehalel/pyvk | 56737ee4547b3f12bf941dcda74305b739d09cbb | [
"MIT"
]
| 1 | 2022-01-09T19:02:00.000Z | 2022-01-09T19:02:00.000Z | vks/vulkanglobals.py | geehalel/pyvk | 56737ee4547b3f12bf941dcda74305b739d09cbb | [
"MIT"
]
| null | null | null | vks/vulkanglobals.py | geehalel/pyvk | 56737ee4547b3f12bf941dcda74305b739d09cbb | [
"MIT"
]
| null | null | null | # Copyright (C) 2019 by [email protected]
# This code is licensed under the MIT license (MIT) (http://opensource.org/licenses/MIT)
import platform
_WIN32 = (platform.system() == 'Windows')
VK_USE_PLATFORM_WIN32_KHR = _WIN32
VK_USE_PLATFORM_ANDROID_KHR = False
VK_USE_PLATFORM_WAYLAND_KHR = False
_DIRECT2DISPLAY = False
#VK_USE_PLATFORM_XCB_KHR = True
VK_USE_PLATFORM_XCB_KHR = not VK_USE_PLATFORM_WIN32_KHR
DEFAULT_FENCE_TIMEOUT = 100000000000
| 29.933333 | 88 | 0.815145 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.378619 |
eb20be04422ba85fc708db252613db55adc1f7a9 | 359 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/vulture/whitelist.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
]
| 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/vulture/whitelist.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
]
| null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/scripts/vulture/whitelist.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
]
| 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | #!/bin/env python
# Vulture often detects false positives when analyzing a code
# base. If there are particular things you wish to ignore,
# add them below. This file is consumed by
# scripts/dead_code/find-dead-code.sh
from vulture.whitelist_utils import Whitelist
view_whitelilst = Whitelist()
# Example:
# view_whitelist.name_of_function_to_whitelist
| 23.933333 | 61 | 0.793872 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 271 | 0.754875 |
eb212bcaed139e5c9db595186ee8e16677921512 | 8,088 | py | Python | mmdet/utils/memory.py | Youth-Got/mmdetection | 2e0a02599804da6e07650dde37b9df538e15d646 | [
"Apache-2.0"
]
| 1 | 2021-12-10T15:08:22.000Z | 2021-12-10T15:08:22.000Z | mmdet/utils/memory.py | q3394101/mmdetection | ca11860f4f3c3ca2ce8340e2686eeaec05b29111 | [
"Apache-2.0"
]
| null | null | null | mmdet/utils/memory.py | q3394101/mmdetection | ca11860f4f3c3ca2ce8340e2686eeaec05b29111 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import abc
from contextlib import contextmanager
from functools import wraps
import torch
from mmdet.utils import get_root_logger
def cast_tensor_type(inputs, src_type=None, dst_type=None):
"""Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.
Args:
inputs: Inputs that to be casted.
src_type (torch.dtype | torch.device): Source type.
src_type (torch.dtype | torch.device): Destination type.
Returns:
The same type with inputs, but all contained Tensors have been cast.
"""
assert dst_type is not None
if isinstance(inputs, torch.Tensor):
if isinstance(dst_type, torch.device):
# convert Tensor to dst_device
if hasattr(inputs, 'to') and \
hasattr(inputs, 'device') and \
(inputs.device == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
else:
# convert Tensor to dst_dtype
if hasattr(inputs, 'to') and \
hasattr(inputs, 'dtype') and \
(inputs.dtype == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
# we need to ensure that the type of inputs to be casted are the same
# as the argument `src_type`.
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type=src_type, dst_type=dst_type)
for item in inputs)
# TODO: Currently not supported
# elif isinstance(inputs, InstanceData):
# for key, value in inputs.items():
# inputs[key] = cast_tensor_type(
# value, src_type=src_type, dst_type=dst_type)
# return inputs
else:
return inputs
@contextmanager
def _ignore_torch_cuda_oom():
"""A context which ignores CUDA OOM exception from pytorch.
Code is modified from
<https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py> # noqa: E501
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if 'CUDA out of memory. ' in str(e):
pass
else:
raise
class AvoidOOM:
"""Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of
Memory error. It will do the following steps:
1. First retry after calling `torch.cuda.empty_cache()`.
2. If that still fails, it will then retry by converting inputs
to FP16.
3. If that still fails trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to
CPU implementation.
Args:
to_cpu (bool): Whether to convert outputs to CPU if get an OOM
error. This will slow down the code significantly.
Defaults to True.
test (bool): Skip `_ignore_torch_cuda_oom` operate that can use
lightweight data in unit test, only used in
test unit. Defaults to False.
Examples:
>>> from mmdet.utils.memory import AvoidOOM
>>> AvoidCUDAOOM = AvoidOOM()
>>> output = AvoidOOM.retry_if_cuda_oom(
>>> some_torch_function)(input1, input2)
>>> # To use as a decorator
>>> # from mmdet.utils import AvoidCUDAOOM
>>> @AvoidCUDAOOM.retry_if_cuda_oom
>>> def function(*args, **kwargs):
>>> return None
```
Note:
1. The output may be on CPU even if inputs are on GPU. Processing
on CPU will slow down the code significantly.
2. When converting inputs to CPU, it will only look at each argument
and check if it has `.device` and `.to` for conversion. Nested
structures of tensors are not supported.
3. Since the function might be called more than once, it has to be
stateless.
"""
def __init__(self, to_cpu=True, test=False):
self.to_cpu = to_cpu
self.test = test
def retry_if_cuda_oom(self, func):
"""Makes a function retry itself after encountering pytorch's CUDA OOM
error.
The implementation logic is referred to
https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py
Args:
func: a stateless callable that takes tensor-like objects
as arguments.
Returns:
func: a callable which retries `func` if OOM is encountered.
""" # noqa: W605
@wraps(func)
def wrapped(*args, **kwargs):
# raw function
if not self.test:
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# get the type and device of first tensor
dtype, device = None, None
values = args + tuple(kwargs.values())
for value in values:
if isinstance(value, torch.Tensor):
dtype = value.dtype
device = value.device
break
if dtype is None or device is None:
raise ValueError('There is no tensor in the inputs, '
'cannot get dtype and device.')
# Convert to FP16
fp16_args = cast_tensor_type(args, dst_type=torch.half)
fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)
logger = get_root_logger()
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to FP16 due to CUDA OOM')
# get input tensor type, the output type will same as
# the first parameter type.
with _ignore_torch_cuda_oom():
output = func(*fp16_args, **fp16_kwargs)
output = cast_tensor_type(
output, src_type=torch.half, dst_type=dtype)
if not self.test:
return output
logger.warning('Using FP16 still meet CUDA OOM')
# Try on CPU. This will slow down the code significantly,
# therefore print a notice.
if self.to_cpu:
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to CPU due to CUDA OOM')
cpu_device = torch.empty(0).device
cpu_args = cast_tensor_type(args, dst_type=cpu_device)
cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)
# convert outputs to GPU
with _ignore_torch_cuda_oom():
logger.warning(f'Convert outputs to GPU (device={device})')
output = func(*cpu_args, **cpu_kwargs)
output = cast_tensor_type(
output, src_type=cpu_device, dst_type=device)
return output
warnings.warn('Cannot convert output to GPU due to CUDA OOM, '
'the output is now on CPU, which might cause '
'errors if the output need to interact with GPU '
'data in subsequent operations')
logger.warning('Cannot convert output to GPU due to '
'CUDA OOM, the output is on CPU now.')
return func(*cpu_args, **cpu_kwargs)
else:
# may still get CUDA OOM error
return func(*args, **kwargs)
return wrapped
# To use AvoidOOM as a decorator
AvoidCUDAOOM = AvoidOOM()
| 37.794393 | 103 | 0.574679 | 5,451 | 0.673961 | 418 | 0.051682 | 3,569 | 0.441271 | 0 | 0 | 4,035 | 0.498887 |
eb213849d6f5cbf00a64871c3293e7fb777f9ff4 | 2,278 | py | Python | game.py | YeonjuKim05/Kim_Y_RPS_Fall2020 | 031bfeec09f663686ae2c9418185ab5070af3b7a | [
"MIT"
]
| null | null | null | game.py | YeonjuKim05/Kim_Y_RPS_Fall2020 | 031bfeec09f663686ae2c9418185ab5070af3b7a | [
"MIT"
]
| 1 | 2020-11-28T16:29:28.000Z | 2020-11-28T16:29:28.000Z | game.py | YeonjuKim05/Kim_Y_RPS_Fall2020 | 031bfeec09f663686ae2c9418185ab5070af3b7a | [
"MIT"
]
| null | null | null | # import packages to extend python (just like we extend sublime, or Atom, or VSCode)
from random import randint
from gameComponents import gameVars, chooseWinner
while gameVars.player is False:
print("=======================*/ RPS CONTEST /*=======================")
print("Computer Lives: ", gameVars.ai_lives, "/", gameVars.total_lives)
print("Player Lives: ", gameVars.player_lives, "/", gameVars.total_lives)
print("==============================================")
print("Choose your weapon! or type quit to leave\n")
gameVars.player = input("Choose rock, paper or scissors: \n")
# if the player chose to quit then exit the game
if gameVars.player == "quit":
print("You chose to quit")
exit()
#player = True -> it has a value (rock, paper, or scissors)
# this will be the AI choice -> a random pick from the choices array
computer = gameVars.choices[randint(0, 2)]
# check to see what the user input
# print outputs whatever is in the round brackets -> in this case it outputs player to the command prompt window
print("user chose: " + gameVars.player)
# validate that the random choice worked for the AI
print("AI chose: " + computer)
#--------------------------- MOVE THIS CHUNK OF CODE TO A PACKAGE - START HERE --------------------
if (computer == gameVars.player):
print("tie")
# always check for negative conditions first (the losing case)
elif (computer == "rock"):
if (gameVars.player == "scissors"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
elif (computer == "paper"):
if (gameVars.player == "rock"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
elif (computer == "scissors"):
if (gameVars.player == "paper"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
#--------------------------- stop here - all of the above needs to move -----------------------
if gameVars.player_lives is 0:
chooseWinner.winorlose("lost")
if gameVars.ai_lives is 0:
chooseWinner.winorlose("won")
print("Player has", gameVars.player_lives, "lives left")
print("AI has", gameVars.ai_lives, "lives left")
gameVars.player = False
| 26.183908 | 113 | 0.6295 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,163 | 0.510536 |
eb21b87b5bc6c350c9c4db10e19ca1430b1bd7c2 | 1,227 | py | Python | dataset/utils.py | tarun-bisht/mlpipe | 0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1 | [
"MIT"
]
| null | null | null | dataset/utils.py | tarun-bisht/mlpipe | 0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1 | [
"MIT"
]
| null | null | null | dataset/utils.py | tarun-bisht/mlpipe | 0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1 | [
"MIT"
]
| null | null | null | import pandas as pd
import os
def df_from_image_dirs(directory, image_format="jpg",
relative_path=False, verbose=0):
dataframe_dict = {
"images":[],
"classes":[]
}
num_dirs = 0
num_images = 0
images_per_classes = []
classes = []
for dirs in os.listdir(directory):
dir_path = os.path.join(directory,dirs)
if os.path.isdir(dir_path):
files = [f for f in os.listdir(dir_path) if f.split(".")[1]==image_format]
num = len(files)
if relative_path:
dataframe_dict["images"] = dataframe_dict["images"]+[os.path.join(dir_path,f) for f in files]
else:
dataframe_dict["images"] = dataframe_dict["images"]+files
dataframe_dict["classes"] = dataframe_dict["classes"]+[dirs]*num
num_images+=num
images_per_classes.append(num)
classes.append(dirs)
num_dirs+=1
if verbose:
print("number of directories(classes)= ",num_dirs)
print("total number of images= ",num_images)
for clss, imgs in zip(classes, images_per_classes):
print(f"{clss} : {imgs}")
return pd.DataFrame.from_dict(dataframe_dict) | 36.088235 | 109 | 0.597392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.124694 |
eb2259b4263e5697783bf6849627924369449a0f | 1,222 | py | Python | THreading.py | asd86826/OpticalFlow_Test | f4d621994871b4913b95a18f59cb171526d786ae | [
"MIT"
]
| null | null | null | THreading.py | asd86826/OpticalFlow_Test | f4d621994871b4913b95a18f59cb171526d786ae | [
"MIT"
]
| null | null | null | THreading.py | asd86826/OpticalFlow_Test | f4d621994871b4913b95a18f59cb171526d786ae | [
"MIT"
]
| null | null | null | import time
from threading import Timer
i = 0
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start() #if you dont want auto start, delte that
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def timeTest():
global i
i = i+1
print ("Hello %d!" % i)
if __name__ == "__main__":
print("Starting...")
rt = RepeatedTimer(0.05, timeTest) # it auto start ,so dont need rt.start()
try:
ST = time.time()
time.sleep(5)
except Exception as e:
raise e
finally:
rt.stop()
print(time.time() - ST)
| 24.44 | 85 | 0.531097 | 778 | 0.636661 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.094926 |
eb22d571bce236b4e4b07269afd4c1273f92107f | 721 | py | Python | src/main/PyCodes/deep_versions.py | panditu2015/DL-Lab-7th-Semester | 59a64d9c219cbed8cc4a75517f46c7f551a95a5a | [
"MIT"
]
| null | null | null | src/main/PyCodes/deep_versions.py | panditu2015/DL-Lab-7th-Semester | 59a64d9c219cbed8cc4a75517f46c7f551a95a5a | [
"MIT"
]
| null | null | null | src/main/PyCodes/deep_versions.py | panditu2015/DL-Lab-7th-Semester | 59a64d9c219cbed8cc4a75517f46c7f551a95a5a | [
"MIT"
]
| null | null | null |
# coding: utf-8
# In[1]:
import keras
# In[2]:
# scipy
import scipy
print( ' scipy: %s ' % scipy.__version__)
# numpy
import numpy
print( ' numpy: %s ' % numpy.__version__)
# matplotlib
import matplotlib
print( ' matplotlib: %s ' % matplotlib.__version__)
# pandas
import pandas
print( ' pandas: %s ' % pandas.__version__)
# statsmodels
import statsmodels
print( ' statsmodels: %s ' % statsmodels.__version__)
# scikit-learn
import sklearn
print( ' sklearn: %s ' % sklearn.__version__)
# In[3]:
# theano
import theano
print( ' theano: %s ' % theano.__version__)
# tensorflow
import tensorflow
print( ' tensorflow: %s ' % tensorflow.__version__)
# keras
import keras
print( ' keras: %s ' % keras.__version__)
| 15.673913 | 53 | 0.694868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 264 | 0.366158 |
eb2601a12ac399bfb0e416993c3a1b51cb79ad73 | 577 | py | Python | graph_help/colorschemes/DarkColorScheme.py | jgurhem/Graph_Generator | d60f4451feef0c530389bfc4bc6978bda3d4c0cb | [
"MIT"
]
| null | null | null | graph_help/colorschemes/DarkColorScheme.py | jgurhem/Graph_Generator | d60f4451feef0c530389bfc4bc6978bda3d4c0cb | [
"MIT"
]
| null | null | null | graph_help/colorschemes/DarkColorScheme.py | jgurhem/Graph_Generator | d60f4451feef0c530389bfc4bc6978bda3d4c0cb | [
"MIT"
]
| null | null | null | from .DefaultColorScheme import DefaultColorScheme
class DarkColorScheme(DefaultColorScheme):
def __init__(self):
self.colors = dict()
self.colors['background'] = 'black'
self.colors['edge'] = 'white'
self.colors['fontcolor'] = 'black'
self.colors['initv'] = 'grey65'
self.colors['initm'] = 'grey65'
self.colors['inv'] = 'red'
self.colors['pmv'] = 'magenta'
self.colors['pmm1'] = 'blue'
self.colors['pmm2'] = 'blue'
self.colors['pmm_d'] = 'darkgreen'
self.colors['pmv_d'] = 'darkolivegreen3'
self.colors['sls'] = 'cyan3'
| 30.368421 | 50 | 0.636049 | 523 | 0.906412 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.315425 |
eb266bf3b2f0517ce3d9501b3cfc011f8ded2d3e | 3,817 | bzl | Python | defs.bzl | attilaolah/bazel-tools | 823216936ee93ab6884c6111a8e60e9a836fa7cc | [
"Apache-2.0"
]
| 2 | 2021-09-02T18:59:09.000Z | 2021-09-20T23:13:17.000Z | defs.bzl | attilaolah/bazel-tools | 823216936ee93ab6884c6111a8e60e9a836fa7cc | [
"Apache-2.0"
]
| null | null | null | defs.bzl | attilaolah/bazel-tools | 823216936ee93ab6884c6111a8e60e9a836fa7cc | [
"Apache-2.0"
]
| null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_skylib//lib:shell.bzl", "shell")
def _json_extract_impl(ctx):
flags = list(ctx.attr.flags)
if ctx.attr.raw:
flags += ["-r"]
outputs = []
for src in ctx.files.srcs:
parts = [ctx.executable._jq.path] + flags
parts += [shell.quote(ctx.attr.query), shell.quote(src.path)]
basename, _, _ = src.basename.rpartition(".json")
output = ctx.actions.declare_file(basename + ctx.attr.suffix)
outputs.append(output)
parts += [">", shell.quote(output.path), "\n"]
cmd = " ".join([part for part in parts if part])
# Using run() would be much nicer, but jq insts on writing to stdout.
ctx.actions.run_shell(
inputs = [src],
outputs = [output],
progress_message = "Executing jq for {}".format(src.short_path),
tools = [ctx.executable._jq],
command = cmd,
)
return [DefaultInfo(
runfiles = ctx.runfiles(files = outputs),
)]
json_extract = rule(
implementation = _json_extract_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = [".json"],
doc = "List of inputs. Must all be valid JSON files.",
),
"suffix": attr.string(
default = "",
doc = ("Output file extensions. Each input file will be renamed " +
"from basename.json to basename+suffix."),
),
"raw": attr.bool(
default = False,
doc = ("Whether or not to pass -r to jq. Passing -r will result " +
"in raw data being extracted, i.e. non-JSQN output."),
),
"query": attr.string(
default = ".",
doc = ("Query to pass to the jq binary. The default is '.', " +
"meaning just copy the validated input."),
),
"flags": attr.string_list(
allow_empty = True,
doc = "List of flags to pass to the jq binary.",
),
"_jq": attr.label(
executable = True,
cfg = "host",
default = Label("@jq"),
),
},
)
def _json_test_impl(ctx):
inputs = [f.path for f in ctx.files.srcs]
parts = [ctx.executable._jq.short_path, "."] + inputs
parts += [">", "/dev/null"] # silence jq, only show errors
cmd = " ".join([part for part in parts if part])
# Write the file that will be executed by 'bazel test'.
ctx.actions.write(
output = ctx.outputs.test,
content = cmd,
)
return [DefaultInfo(
executable = ctx.outputs.test,
runfiles = ctx.runfiles(files = [
ctx.executable._jq,
] + ctx.files.srcs),
)]
json_test = rule(
implementation = _json_test_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = [".json"],
doc = ("List of inputs. The test will verify that they are " +
"valid JSON files."),
),
"_jq": attr.label(
executable = True,
cfg = "host",
default = Label("@jq"),
),
},
outputs = {"test": "%{name}.sh"},
test = True,
)
| 31.545455 | 79 | 0.556196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,385 | 0.36285 |
eb26e6350d60cf3d97e04c6da4b6ad1b56768020 | 554 | py | Python | Psi_Phi/plot.py | Twinstar2/Phython_scripts | 19f88420bca64014585e87747d01737afe074400 | [
"MIT"
]
| null | null | null | Psi_Phi/plot.py | Twinstar2/Phython_scripts | 19f88420bca64014585e87747d01737afe074400 | [
"MIT"
]
| 1 | 2018-02-14T15:19:07.000Z | 2018-02-14T15:19:07.000Z | Psi_Phi/plot.py | TobiasJu/Python_Master_scripts | 19f88420bca64014585e87747d01737afe074400 | [
"MIT"
]
| null | null | null | import matplotlib.pyplot as plt
plt.switch_backend('agg')
import seaborn as sns
sns_plot = \
(sns.jointplot(psi, phi, size=12, space=0, xlim=(-190, 190), ylim=(-190, 190)).plot_joint(sns.kdeplot, zorder=0,
n_levels=6))
# sns_plot = sns.jointplot(psi_list_numpy, phi_list_numpy, kind="hex", color="#4CB391") # stat_func=kendalltau
# sns_plot.ylim(-180, 180)
print "plotting: ", pfam
sns_plot.savefig("Ramachandranplot_scatter/ramachandranplot_" + pfam + ".png") | 39.571429 | 112 | 0.617329 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 204 | 0.368231 |
eb289039ceb1e6cb9ff0bbb176aa1f763781e163 | 692 | py | Python | tests/test_instrumentation/test_base.py | cloudchacho/hedwig-python | 1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b | [
"Apache-2.0"
]
| null | null | null | tests/test_instrumentation/test_base.py | cloudchacho/hedwig-python | 1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b | [
"Apache-2.0"
]
| 3 | 2021-06-25T20:52:50.000Z | 2021-11-30T16:22:30.000Z | tests/test_instrumentation/test_base.py | cloudchacho/hedwig-python | 1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b | [
"Apache-2.0"
]
| null | null | null | from unittest import mock
import pytest
get_tracer = pytest.importorskip('opentelemetry.trace.get_tracer')
@mock.patch('hedwig.backends.base.Message.exec_callback', autospec=True)
def test_message_handler_updates_span_name(mock_exec_callback, message, consumer_backend):
provider_metadata = mock.Mock()
tracer = get_tracer(__name__)
with tracer.start_as_current_span(test_message_handler_updates_span_name.__name__, {}) as span:
assert span.name == test_message_handler_updates_span_name.__name__
consumer_backend.message_handler(*message.serialize(), provider_metadata)
assert span.name == message.type
assert span.get_span_context().is_valid
| 40.705882 | 99 | 0.789017 | 0 | 0 | 0 | 0 | 580 | 0.83815 | 0 | 0 | 76 | 0.109827 |
eb2a05506a2d5dac21a3a7230d334f572006e5b5 | 42 | py | Python | logic/start_game.py | sparkingdark/Project | fdd521407d788d1945275148337992a795ebdf0c | [
"MIT"
]
| null | null | null | logic/start_game.py | sparkingdark/Project | fdd521407d788d1945275148337992a795ebdf0c | [
"MIT"
]
| null | null | null | logic/start_game.py | sparkingdark/Project | fdd521407d788d1945275148337992a795ebdf0c | [
"MIT"
]
| 5 | 2020-11-28T13:13:15.000Z | 2020-12-07T16:32:36.000Z | from logic import *
def game():
pass | 8.4 | 19 | 0.619048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
eb2a6dfadfc03cbe4b08fd33a47e0c0b3e370224 | 1,184 | py | Python | Leetcode/SwapNodesInPairs.py | tswsxk/CodeBook | 01b976418d64f5f94257ae0e2b36751afb93c105 | [
"MIT"
]
| null | null | null | Leetcode/SwapNodesInPairs.py | tswsxk/CodeBook | 01b976418d64f5f94257ae0e2b36751afb93c105 | [
"MIT"
]
| 1 | 2019-09-24T22:04:03.000Z | 2019-09-24T22:04:03.000Z | Leetcode/SwapNodesInPairs.py | tswsxk/CodeBook | 01b976418d64f5f94257ae0e2b36751afb93c105 | [
"MIT"
]
| null | null | null | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
nodeRec = []
check = head
precheck = head
count = 0
n = 2
while check:
nodeRec.append(check)
count += 1
if count == n:
count = 0
check = check.next
for i, x in enumerate(nodeRec):
if i > 0:
x.next = nodeRec[i - 1]
else:
x.next = check
if nodeRec[0] == head:
head = nodeRec[n - 1]
else:
precheck.next = nodeRec[n - 1]
precheck = nodeRec[0]
nodeRec = []
continue
check = check.next
return head
def initlist(listnum):
head = ListNode(listnum[0])
tail = head
for num in listnum[1:]:
tail.next = ListNode(num)
tail = tail.next
return head
if __name__ == "__main__":
sol = Solution()
sol.swapPairs(initlist([1,2,3,4])) | 24.163265 | 44 | 0.47973 | 902 | 0.761824 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.097128 |
eb2b0a445ecc0e541307b4aff935b22d4cc3183d | 939 | py | Python | hello.py | ookcode/CodingSpider | eac57ef8b41be841a8366f3cc376ff259d01e27f | [
"MIT"
]
| null | null | null | hello.py | ookcode/CodingSpider | eac57ef8b41be841a8366f3cc376ff259d01e27f | [
"MIT"
]
| null | null | null | hello.py | ookcode/CodingSpider | eac57ef8b41be841a8366f3cc376ff259d01e27f | [
"MIT"
]
| 1 | 2022-02-23T07:12:23.000Z | 2022-02-23T07:12:23.000Z | #!/usr/bin/python
#coding=utf-8
import os
from flask import Flask
from flask import Response
from flask import request
app = Flask(__name__)
@app.route('/')
def root():
return app.send_static_file('index.html')
@app.route('/env')
def env():
html = "System Environment:\n\n"
for env in os.environ.keys():
html += env + ': ' + os.environ[env] + "\n"
return Response(html, mimetype='text/plain')
@app.route('/spider', methods=['GET', 'POST'])
def spider():
if 'username' in request.args and 'password' in request.args :
username = request.args['username']
password = request.args['password']
output = os.popen("python coding_spider.py -u {} -p {}".format(username,password))
return Response(output.read(), mimetype='text/plain')
else :
return Response('error params,please input username and password', mimetype='text/plain')
if __name__ == "__main__":
app.run() | 30.290323 | 97 | 0.652822 | 0 | 0 | 0 | 0 | 751 | 0.799787 | 0 | 0 | 276 | 0.29393 |
eb2c8b8b8d777e9a0438515ac0aea6cd01f5301b | 2,696 | py | Python | chess-board-0.2.0/chessboard/pieces.py | fshelobolin/irohbot | 4ad4c554ecff1e1005fbecf26ee097c387bf357d | [
"MIT"
]
| null | null | null | chess-board-0.2.0/chessboard/pieces.py | fshelobolin/irohbot | 4ad4c554ecff1e1005fbecf26ee097c387bf357d | [
"MIT"
]
| null | null | null | chess-board-0.2.0/chessboard/pieces.py | fshelobolin/irohbot | 4ad4c554ecff1e1005fbecf26ee097c387bf357d | [
"MIT"
]
| null | null | null | """
Ahira Justice, ADEFOKUN
[email protected]
"""
import os
import pygame
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE_DIR = os.path.join(BASE_DIR, "images")
BLACK = "BLACK"
WHITE = "WHITE"
BISHOP = "BISHOP"
KING = "KING"
KNGHT = "KNIGHT"
PAWN = "PAWN"
QUEEN = "QUEEN"
ROOK = "ROOK"
class Piece:
bBishop = pygame.image.load(os.path.join(IMAGE_DIR, "bB.png"))
bKing = pygame.image.load(os.path.join(IMAGE_DIR, "bK.png"))
bKnight = pygame.image.load(os.path.join(IMAGE_DIR, "bN.png"))
bPawn = pygame.image.load(os.path.join(IMAGE_DIR, "bP.png"))
bQueen = pygame.image.load(os.path.join(IMAGE_DIR, "bQ.png"))
bRook = pygame.image.load(os.path.join(IMAGE_DIR, "bR.png"))
wBishop = pygame.image.load(os.path.join(IMAGE_DIR, "wB.png"))
wKing = pygame.image.load(os.path.join(IMAGE_DIR, "wK.png"))
wKnight = pygame.image.load(os.path.join(IMAGE_DIR, "wN.png"))
wPawn = pygame.image.load(os.path.join(IMAGE_DIR, "wP.png"))
wQueen = pygame.image.load(os.path.join(IMAGE_DIR, "wQ.png"))
wRook = pygame.image.load(os.path.join(IMAGE_DIR, "wR.png"))
def __init__(self, color, piece, DISPLAYSURF):
self.position = None
self.sprite = None
self.DISPLAYSURF = DISPLAYSURF
self.color = color
self.piece = piece
self.setSprite()
def setPosition(self, position):
self.position = position
def setSprite(self):
if self.piece == BISHOP:
if self.color == BLACK:
self.sprite = Piece.bBishop
elif self.color == WHITE:
self.sprite = Piece.wBishop
elif self.piece == KING:
if self.color == BLACK:
self.sprite = Piece.bKing
elif self.color == WHITE:
self.sprite = Piece.wKing
elif self.piece == KNGHT:
if self.color == BLACK:
self.sprite = Piece.bKnight
if self.color == WHITE:
self.sprite = Piece.wKnight
elif self.piece == PAWN:
if self.color == BLACK:
self.sprite = Piece.bPawn
elif self.color == WHITE:
self.sprite = Piece.wPawn
elif self.piece == QUEEN:
if self.color == BLACK:
self.sprite = Piece.bQueen
elif self.color == WHITE:
self.sprite = Piece.wQueen
elif self.piece == ROOK:
if self.color == BLACK:
self.sprite = Piece.bRook
elif self.color == WHITE:
self.sprite = Piece.wRook
def displayPiece(self):
self.DISPLAYSURF.blit(self.sprite, self.position)
| 29.304348 | 66 | 0.582715 | 2,376 | 0.881306 | 0 | 0 | 0 | 0 | 0 | 0 | 221 | 0.081973 |
eb2cab16d3d0736d863c283be6817d00ab5e890d | 3,993 | py | Python | stacks/XIAOMATECH/1.0/services/ROCKETMQ/package/scripts/namesrv.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
]
| 3 | 2019-08-13T01:44:16.000Z | 2019-12-10T04:05:56.000Z | stacks/XIAOMATECH/1.0/services/ROCKETMQ/package/scripts/namesrv.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
]
| null | null | null | stacks/XIAOMATECH/1.0/services/ROCKETMQ/package/scripts/namesrv.py | tvorogme/dataops | acfa21df42a20768c004c6630a064f4e38e280b2 | [
"Apache-2.0"
]
| 7 | 2019-05-29T17:35:25.000Z | 2021-12-04T07:55:10.000Z | from resource_management.core.resources.system import Execute
from resource_management.libraries.script import Script
from resource_management.core.resources.system import Directory
from resource_management.core.resources.system import File
from resource_management.core.source import InlineTemplate
from resource_management.libraries.functions.check_process_status import check_process_status
import os
def install_rocketmq():
import params
Directory([
params.pid_dir, params.log_dir, params.conf_dir, params.store_commitlog, params.store_queue],
owner=params.rocketmq_user,
group=params.user_group,
mode=0755,
create_parents=True)
if not os.path.exists(Script.get_stack_root() + '/' + params.version_dir) or not os.path.exists(
params.install_dir):
Execute('rm -rf %s' % Script.get_stack_root() + '/' + params.version_dir)
Execute('rm -rf %s' % params.install_dir)
Execute('/bin/rm -f /tmp/' + params.filename)
Execute(
'wget ' + params.download_url + ' -O /tmp/' + params.filename,
user=params.rocketmq_user)
Execute('tar -zxvf /tmp/' + params.filename + ' -C ' + Script.get_stack_root())
Execute('ln -s ' + Script.get_stack_root() + '/' + params.version_dir + ' ' + params.install_dir)
Execute('rm -rf ' + params.install_dir + '/conf ')
Execute('ln -s ' + params.conf_dir + ' ' + params.install_dir + '/conf ')
Execute("echo 'export PATH=%s/bin:$PATH'>/etc/profile.d/rocketmq.sh" %
params.install_dir)
Execute('chown -R %s:%s %s/%s' %
(params.rocketmq_user, params.user_group, Script.get_stack_root(), params.version_dir))
Execute('chown -R %s:%s %s' % (params.rocketmq_user, params.user_group,
params.install_dir))
def config_rocketmq():
import params
File(
params.conf_dir + '/broker.conf',
content=InlineTemplate(params.broker_content),
mode=0755,
owner=params.rocketmq_user,
group=params.user_group)
File(
params.conf_dir + '/logback_broker.xml',
content=InlineTemplate(params.logback_broker_content),
mode=0755,
owner=params.rocketmq_user,
group=params.user_group)
File(
params.conf_dir + '/logback_namesrv.xml',
content=InlineTemplate(params.logback_namesrv_content),
mode=0755,
owner=params.rocketmq_user,
group=params.user_group)
File(
params.conf_dir + '/logback_tools.xml',
content=InlineTemplate(params.logback_tools_content),
mode=0755,
owner=params.rocketmq_user,
group=params.user_group)
File(
params.conf_dir + '/plain_acl.yml',
content=InlineTemplate(params.acl_content),
mode=0755,
owner=params.rocketmq_user,
group=params.user_group)
class Rocketmq(Script):
pid_file = '/var/run/mqnamesrv.pid'
def install(self, env):
import params
env.set_params(params)
self.install_packages(env)
install_rocketmq()
def configure(self, env):
import params
env.set_params(params)
config_rocketmq()
def stop(self, env):
import params
env.set_params(params)
Execute(params.install_dir + '/bin/mqshutdown namesrv')
def start(self, env):
import params
env.set_params(params)
install_rocketmq()
self.configure(env)
Execute('nohup ' + params.install_dir + '/bin/runserver.sh org.apache.rocketmq.namesrv.NamesrvStartup &')
Execute(
"echo `ps ax | grep -i 'org.apache.rocketmq.namesrv.NamesrvStartup' |grep java | grep -v grep | awk '{print $1}'` > " + self.pid_file)
def status(self, env):
import params
env.set_params(params)
check_process_status(self.pid_file)
if __name__ == "__main__":
Rocketmq().execute()
| 35.651786 | 146 | 0.643877 | 998 | 0.249937 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.145755 |
eb361ceecffd166eeb0b6b3ee13b8be48e6f4d86 | 819 | py | Python | setup.py | ktvng/cue | 5f31c8898f3bc53a18956220f609489cd2bbe590 | [
"MIT"
]
| null | null | null | setup.py | ktvng/cue | 5f31c8898f3bc53a18956220f609489cd2bbe590 | [
"MIT"
]
| null | null | null | setup.py | ktvng/cue | 5f31c8898f3bc53a18956220f609489cd2bbe590 | [
"MIT"
]
| null | null | null | """Cue: Script Orchestration for Data Analysis
Cue lets your package your data analysis into simple actions which can be connected
into a dynamic data analysis pipeline with coverage over even complex data sets.
"""
DOCLINES = (__doc__ or '').split('\n')
from setuptools import find_packages, setup
setup(
name='py-cue',
package_dir={'cue/cue': 'cue'},
packages=find_packages(include=['cue']),
version='0.1.0',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
project_urls={
"Source Code": "https://github.com/ktvng/cue"
},
author='ktvng',
license='MIT',
python_requires='>=3.8',
install_requires=['pyyaml>=5.2'],
entry_points={
'console_scripts': {
'cue=cue.cli:run'
}
}
)
| 26.419355 | 85 | 0.616606 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 374 | 0.456654 |
eb3657629d59fdcbd7874c2822fc0707cfc70c45 | 1,689 | py | Python | tests/getz.py | deflax/steinvord | 709326ff219159a78f644c0adf3c5b224ed42804 | [
"Zlib"
]
| 1 | 2021-06-02T19:51:26.000Z | 2021-06-02T19:51:26.000Z | tests/getz.py | deflax/steinvord | 709326ff219159a78f644c0adf3c5b224ed42804 | [
"Zlib"
]
| null | null | null | tests/getz.py | deflax/steinvord | 709326ff219159a78f644c0adf3c5b224ed42804 | [
"Zlib"
]
| null | null | null | #!/usr/bin/python3.2
#
# Zabbix API Python usage example
# Christoph Haas <[email protected]>
#
username=''
password='1'
hostgroup=''
item_name='system.cpu.load[,avg1]'
zabbix_url=''
import zabbix_api
import sys
# Connect to Zabbix server
z=zabbix_api.ZabbixAPI(server=zabbix_url)
z.login(user=username, password=password)
# Get hosts in the hostgroup
hostgroup = z.hostgroup.get(
{
'filter': { 'name':hostgroup },
'sortfield': 'name',
'sortorder': 'ASC',
'limit':2,
'select_hosts':'extend'
})
print(hostgroup[0])
print("\n")
for host in hostgroup[0]['name']:
hostname = host['host']
print("Host:", hostname)
print("Host-ID:", host['hostid'])
item = z.item.get({
'output':'extend',
'hostids':host['hostid'],
'filter':{'key_':item_name}})
if item:
print(item[0]['lastvalue'])
print("Item-ID:", item[0]['itemid'])
# Get history
lastvalue = z.history.get({
'history': item[0]['value_type'],
'itemids': item[0]['itemid'],
'output': 'extend',
# Sort by timestamp from new to old
'sortfield':'clock',
'sortorder':'DESC',
# Get only the first (=newest) entry
'limit': 1,
})
# CAVEAT! The history.get function must be told which type the
# values are (float, text, etc.). The item.value_type contains
# the number that needs to be passed to history.get.
if lastvalue:
lastvalue = lastvalue[0]['value']
print("Last value:", lastvalue)
else:
print("No item....")
print("---------------------------")
| 23.788732 | 70 | 0.562463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 795 | 0.470693 |
eb3b035d6a2b960bc0d338d7dd3785c2208f99f5 | 11,813 | py | Python | server.py | uanthwal/starter-snake-python | 6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417 | [
"MIT"
]
| null | null | null | server.py | uanthwal/starter-snake-python | 6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417 | [
"MIT"
]
| null | null | null | server.py | uanthwal/starter-snake-python | 6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417 | [
"MIT"
]
| null | null | null | import copy
import math
import os
import random
import cherrypy
"""
This is a simple Battlesnake server written in Python.
For instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md
"""
class Battlesnake(object):
global neighbours
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self):
# This function is called when you register your Battlesnake on play.battlesnake.com
# It controls your Battlesnake appearance and author permissions.
# TIP: If you open your Battlesnake URL in browser you should see this data
return {
"apiversion": "1",
"author": "", # TODO: Your Battlesnake Username
"color": "#B765CD", # TODO: Personalize
"head": "default", # TODO: Personalize
"tail": "default", # TODO: Personalize
}
@cherrypy.expose
@cherrypy.tools.json_in()
def start(self):
# This function is called everytime your snake is entered into a game.
# cherrypy.request.json contains information about the game that's about to be played.
data = cherrypy.request.json
print("START")
return "ok"
def get_head_radii_coordinates(self, head):
top_btm_coordinates = [
{
'x': head['x'],
'y': head['y'] - 1
}
,
{
'x': head['x'],
'y': head['y'] + 1
}
]
left_right_coordinates = [
{
'x': head['x'] - 1,
'y': head['y']
}
,
{
'x': head['x'] + 1,
'y': head['y']
}
]
diagonal_coord = [
{
'x': head['x'] + 1,
'y': head['y'] + 1
}
,
{
'x': head['x'] - 1,
'y': head['y'] - 1
}
]
return top_btm_coordinates + left_right_coordinates + diagonal_coord
def get_distance_bw_2_points(self, p1, p2):
return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))
def get_neighbours(self, data):
neighbours = []
min_dist = 9999999
min_dist_id = ""
for snek in data['board']['snakes']:
if snek['id'] != data['you']['id']:
p1 = [data['you']['head']['x'], data['you']['head']['y']]
p2 = [snek['head']['x'], snek['head']['y']]
dist = self.get_distance_bw_2_points(p1, p2)
if dist < min_dist:
min_dist_id = snek['id']
neigh_coord = self.get_head_radii_coordinates(data['you']['head'])
for snek_bdy_coord in snek['body']:
if snek_bdy_coord in neigh_coord:
neighbours.append(snek['id'])
break
if len(neighbours) == 0:
neighbours.append(min_dist_id)
return neighbours
def will_go_out_of_bounds(self, data, direction):
head = data['you']['head']
if direction == "up" and head['y'] == data['board']['height'] - 1:
return True
elif direction == "down" and head['y'] == 0:
return True
elif direction == "right" and head['x'] == data['board']['width'] - 1:
return True
elif direction == "left" and head['x'] == 0:
return True
return False
def will_collide_with_self(self, data, direction):
head = data['you']['head']
your_body = data['you']['body']
if direction == "up" and {
'x': head['x'],
'y': head['y'] + 1
} in your_body:
return True
elif direction == "down" and {
'x': head['x'],
'y': head['y'] - 1
} in your_body:
return True
elif direction == "right" and {
'x': head['x'] + 1,
'y': head['y']
} in your_body:
return True
elif direction == "left" and {
'x': head['x'] - 1,
'y': head['y']
} in your_body:
return True
return False
def will_hit_another_snake(self, data, direction, neighbours):
head = data['you']['head']
for snake in data['board']['snakes']:
res = True
if len(neighbours) > 0:
res = data['you']['id'] != snake['id'] and snake['id'] in neighbours
else:
res = data['you']['id'] != snake['id']
if res:
opponent_body = snake['body']
if direction == "up":
if {
'x': head['x'],
'y': head['y'] + 1
} in opponent_body:
return True
elif direction == "down":
if {
'x': head['x'],
'y': head['y'] - 1
} in opponent_body:
return True
elif direction == "right":
if {
'x': head['x'] + 1,
'y': head['y']
} in opponent_body:
return True
elif direction == "left":
if {
'x': head['x'] - 1,
'y': head['y']
} in opponent_body:
return True
return False
def get_safe_move_x_from_data(self, moves_data, data):
move = None
for key in moves_data:
will_hit_another_snake = moves_data[key]['will_hit_another_snake']
will_go_out_of_bounds = moves_data[key]['will_go_out_of_bounds']
will_hit_self = moves_data[key]['will_hit_self']
if not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self and \
self.check_if_move_is_safe(data, key):
move = key
break
# if there's no move that looks to be safe after checking with self.check_if_move_is_safe(data, key); then
# for survival leaving it to its fate; LUCK :D
if move is None:
for key in moves_data:
will_hit_another_snake = moves_data[key]['will_hit_another_snake']
will_go_out_of_bounds = moves_data[key]['will_go_out_of_bounds']
will_hit_self = moves_data[key]['will_hit_self']
if not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self:
move = key
break
return move
def should_eat_food(self, data):
if data['you']['health'] < 40:
return True
return False
def get_distance_to_food(self, food_pos, head):
return abs(food_pos['x'] - head['x']) + abs(food_pos['y'] - head['y'])
def find_nearest_food(self, data):
if len(data['board']['food']) == 0:
return None
nearest = data['board']['food'][0]
min_distance = self.get_distance_to_food(data['board']['food'][0], data['you']['head'])
for food in data['board']['food']:
current_distance = self.get_distance_to_food(food, data['you']['head'])
if min_distance > current_distance:
nearest = food
min_distance = current_distance
return nearest
def get_direction_to_eat(self, data, moves_data):
nearest_food = self.find_nearest_food(data)
if nearest_food is not None:
print(f"there is food at: {nearest_food}")
shouldGoUp = False
shouldGoRight = False
shouldGoLeft = False
shouldGoDown = False
if nearest_food['x'] > data['you']['head']['x']:
# need to move right
shouldGoRight = True
print("1")
elif nearest_food['x'] < data['you']['head']['x']:
# need to move left
shouldGoLeft = True
print("2")
if nearest_food['y'] > data['you']['head']['y']:
# need to move up
shouldGoUp = True
print("3")
elif nearest_food['y'] < data['you']['head']['y']:
# need to move down
shouldGoDown = True
print("4")
if shouldGoRight and self.can_go_in_direction(moves_data, data, "right"):
return "right"
elif shouldGoLeft and self.can_go_in_direction(moves_data, data, "left"):
return "left"
elif shouldGoUp and self.can_go_in_direction(moves_data, data, "up"):
return "up"
elif shouldGoDown and self.can_go_in_direction(moves_data, data, "down"):
return "down"
return None
def can_go_in_direction(self, moves_data, data, key):
can_go = False
will_hit_another_snake = moves_data[key]['will_hit_another_snake']
will_go_out_of_bounds = moves_data[key]['will_go_out_of_bounds']
will_hit_self = moves_data[key]['will_hit_self']
if not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self and \
self.check_if_move_is_safe(data, key):
can_go = True
if not can_go:
return not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self
return can_go
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def move(self):
# This function is called on every turn of a game. It's how your snake decides where to move.
# Valid moves are "up", "down", "left", or "right".
# TODO: Use the information in cherrypy.request.json to decide your next move.
data = cherrypy.request.json
print("data is:****************")
print(data)
print("data is:****************")
neighbours = self.get_neighbours(data)
possible_moves = ["up", "down", "left", "right"]
# random.shuffle(possible_moves)
# moves_data stores data for all 4 directions with their values for will_hit_another_snake and
# will_go_out_of_bounds
moves_data = {
"up": {}, "down": {}, "left": {}, "right": {}
}
for possible_move in possible_moves:
will_go_out_of_bounds = self.will_go_out_of_bounds(data, possible_move)
if not will_go_out_of_bounds:
will_hit_self = self.will_collide_with_self(data, possible_move)
will_hit_another_snake = self.will_hit_another_snake(
data, possible_move, neighbours)
moves_data[possible_move] = {
'will_hit_another_snake': will_hit_another_snake,
'will_hit_self': will_hit_self,
'will_go_out_of_bounds': will_go_out_of_bounds
}
else:
moves_data[possible_move] = {
'will_hit_another_snake': True,
'will_hit_self': True,
'will_go_out_of_bounds': will_go_out_of_bounds
}
move = None
# if self.should_eat_food(data):
# move = self.get_direction_to_eat(data, moves_data)
if move is None:
move = self.get_safe_move_x_from_data(moves_data,
data)
if move is None:
print("************* making a random move ****************")
move = random.choice(possible_moves)
print(f"MOVE: {move}")
return {"move": move}
def check_if_move_is_safe(self, data, move):
your_head_nxt_pos = copy.deepcopy(data['you']['head'])
if move == "up":
your_head_nxt_pos['y'] += 1
possible_heads = [{'x': your_head_nxt_pos['x'] - 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'] + 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] + 1}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
if move == "down":
your_head_nxt_pos['y'] -= 1
possible_heads = [{'x': your_head_nxt_pos['x'] - 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] - 1},
{'x': your_head_nxt_pos['x'] + 1, 'y': your_head_nxt_pos['y']}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
if move == "left":
your_head_nxt_pos['x'] -= 1
possible_heads = [{'x': your_head_nxt_pos['x'] - 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] + 1},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] - 1}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
if move == "right":
your_head_nxt_pos['x'] += 1
possible_heads = [{'x': your_head_nxt_pos['x'] + 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] + 1},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] - 1}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
return True
@cherrypy.expose
@cherrypy.tools.json_in()
def end(self):
# This function is called when a game your snake was in ends.
# It's purely for informational purposes, you don't have to make any decisions here.
data = cherrypy.request.json
print("END")
return "ok"
if __name__ == "__main__":
server = Battlesnake()
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
cherrypy.config.update({
"server.socket_port":
int(os.environ.get("PORT", "8080")),
})
print("Starting Battlesnake Server...")
cherrypy.quickstart(server)
| 31.501333 | 108 | 0.632439 | 11,314 | 0.957758 | 0 | 0 | 2,904 | 0.245831 | 0 | 0 | 3,187 | 0.269788 |
eb3bba063d98bf83051c3973141cbbea653626d3 | 342 | py | Python | EventIntegrityLib.py | fermi-lat/EventIntegrity | 600c64c7b9be57e1008d12b7bd28ef0d260d7973 | [
"BSD-3-Clause"
]
| null | null | null | EventIntegrityLib.py | fermi-lat/EventIntegrity | 600c64c7b9be57e1008d12b7bd28ef0d260d7973 | [
"BSD-3-Clause"
]
| null | null | null | EventIntegrityLib.py | fermi-lat/EventIntegrity | 600c64c7b9be57e1008d12b7bd28ef0d260d7973 | [
"BSD-3-Clause"
]
| null | null | null | # $Header: /nfs/slac/g/glast/ground/cvs/GlastRelease-scons/EventIntegrity/EventIntegrityLib.py,v 1.2 2008/08/28 21:50:54 ecephas Exp $
def generate(env, **kw):
if not kw.get('depsOnly', 0):
env.Tool('addLibrary', library = ['EventIntegrity'])
env.Tool('GlastSvcLib')
env.Tool('LdfEventLib')
def exists(env):
return 1;
| 38 | 134 | 0.681287 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.578947 |
eb3c0fe9fe75281912b7403d1e9af8679184f59d | 107 | py | Python | mr4mp/__init__.py | lapets/mr4mp | 3f3d6ec01272d4b450eda536b37bcd76851a57d2 | [
"MIT"
]
| 5 | 2019-06-28T17:36:37.000Z | 2022-03-08T18:59:01.000Z | mr4mp/__init__.py | lapets/mr4mp | 3f3d6ec01272d4b450eda536b37bcd76851a57d2 | [
"MIT"
]
| null | null | null | mr4mp/__init__.py | lapets/mr4mp | 3f3d6ec01272d4b450eda536b37bcd76851a57d2 | [
"MIT"
]
| null | null | null | """Gives users direct access to class and functions."""
from mr4mp.mr4mp import pool, mapreduce, mapconcat
| 35.666667 | 55 | 0.775701 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.514019 |
eb3c1435400a880f8b3833ff6b37ef02c5237e11 | 59,098 | py | Python | google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
]
| 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
]
| 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
]
| 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.devtools.testing.v1',
manifest={
'OrchestratorOption',
'RoboActionType',
'InvalidMatrixDetails',
'TestState',
'OutcomeSummary',
'TestMatrix',
'TestExecution',
'TestSpecification',
'SystraceSetup',
'TestSetup',
'IosTestSetup',
'EnvironmentVariable',
'Account',
'GoogleAuto',
'Apk',
'AppBundle',
'DeviceFile',
'ObbFile',
'RegularFile',
'IosDeviceFile',
'AndroidTestLoop',
'IosXcTest',
'IosTestLoop',
'AndroidInstrumentationTest',
'AndroidRoboTest',
'RoboDirective',
'RoboStartingIntent',
'LauncherActivityIntent',
'StartActivityIntent',
'EnvironmentMatrix',
'AndroidDeviceList',
'IosDeviceList',
'AndroidMatrix',
'ClientInfo',
'ClientInfoDetail',
'ResultStorage',
'ToolResultsHistory',
'ToolResultsExecution',
'ToolResultsStep',
'GoogleCloudStorage',
'FileReference',
'Environment',
'AndroidDevice',
'IosDevice',
'TestDetails',
'InvalidRequestDetail',
'ShardingOption',
'UniformSharding',
'ManualSharding',
'TestTargetsForShard',
'Shard',
'CreateTestMatrixRequest',
'GetTestMatrixRequest',
'CancelTestMatrixRequest',
'CancelTestMatrixResponse',
},
)
class OrchestratorOption(proto.Enum):
r"""Specifies how to execute the test."""
ORCHESTRATOR_OPTION_UNSPECIFIED = 0
USE_ORCHESTRATOR = 1
DO_NOT_USE_ORCHESTRATOR = 2
class RoboActionType(proto.Enum):
r"""Actions which Robo can perform on UI elements."""
ACTION_TYPE_UNSPECIFIED = 0
SINGLE_CLICK = 1
ENTER_TEXT = 2
IGNORE = 3
class InvalidMatrixDetails(proto.Enum):
r"""The detailed reason that a Matrix was deemed INVALID."""
INVALID_MATRIX_DETAILS_UNSPECIFIED = 0
DETAILS_UNAVAILABLE = 1
MALFORMED_APK = 2
MALFORMED_TEST_APK = 3
NO_MANIFEST = 4
NO_PACKAGE_NAME = 5
INVALID_PACKAGE_NAME = 31
TEST_SAME_AS_APP = 6
NO_INSTRUMENTATION = 7
NO_SIGNATURE = 20
INSTRUMENTATION_ORCHESTRATOR_INCOMPATIBLE = 18
NO_TEST_RUNNER_CLASS = 19
NO_LAUNCHER_ACTIVITY = 8
FORBIDDEN_PERMISSIONS = 9
INVALID_ROBO_DIRECTIVES = 10
INVALID_RESOURCE_NAME = 33
INVALID_DIRECTIVE_ACTION = 34
TEST_LOOP_INTENT_FILTER_NOT_FOUND = 12
SCENARIO_LABEL_NOT_DECLARED = 13
SCENARIO_LABEL_MALFORMED = 14
SCENARIO_NOT_DECLARED = 15
DEVICE_ADMIN_RECEIVER = 17
MALFORMED_XC_TEST_ZIP = 11
BUILT_FOR_IOS_SIMULATOR = 24
NO_TESTS_IN_XC_TEST_ZIP = 25
USE_DESTINATION_ARTIFACTS = 26
TEST_NOT_APP_HOSTED = 28
PLIST_CANNOT_BE_PARSED = 30
TEST_ONLY_APK = 21
MALFORMED_IPA = 22
MISSING_URL_SCHEME = 35
MALFORMED_APP_BUNDLE = 36
NO_CODE_APK = 23
INVALID_INPUT_APK = 27
INVALID_APK_PREVIEW_SDK = 29
class TestState(proto.Enum):
r"""The state (i.e., progress) of a test execution or matrix."""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 8
PENDING = 1
RUNNING = 2
FINISHED = 3
ERROR = 4
UNSUPPORTED_ENVIRONMENT = 5
INCOMPATIBLE_ENVIRONMENT = 9
INCOMPATIBLE_ARCHITECTURE = 10
CANCELLED = 6
INVALID = 7
class OutcomeSummary(proto.Enum):
r"""Outcome summary for a finished test matrix."""
OUTCOME_SUMMARY_UNSPECIFIED = 0
SUCCESS = 1
FAILURE = 2
INCONCLUSIVE = 3
SKIPPED = 4
class TestMatrix(proto.Message):
r"""TestMatrix captures all details about a test. It contains the
environment configuration, test specification, test executions
and overall state and outcome.
Attributes:
test_matrix_id (str):
Output only. Unique id set by the service.
project_id (str):
The cloud project that owns the test matrix.
client_info (google.devtools.testing_v1.types.ClientInfo):
Information about the client which invoked
the test.
test_specification (google.devtools.testing_v1.types.TestSpecification):
Required. How to run the test.
environment_matrix (google.devtools.testing_v1.types.EnvironmentMatrix):
Required. The devices the tests are being
executed on.
test_executions (Sequence[google.devtools.testing_v1.types.TestExecution]):
Output only. The list of test executions that
the service creates for this matrix.
result_storage (google.devtools.testing_v1.types.ResultStorage):
Required. Where the results for the matrix
are written.
state (google.devtools.testing_v1.types.TestState):
Output only. Indicates the current progress
of the test matrix.
timestamp (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time this test matrix was
initially created.
invalid_matrix_details (google.devtools.testing_v1.types.InvalidMatrixDetails):
Output only. Describes why the matrix is
considered invalid. Only useful for matrices in
the INVALID state.
flaky_test_attempts (int):
The number of times a TestExecution should be
re-attempted if one or more of its test cases
fail for any reason. The maximum number of
reruns allowed is 10.
Default is 0, which implies no reruns.
outcome_summary (google.devtools.testing_v1.types.OutcomeSummary):
Output Only. The overall outcome of the test.
Only set when the test matrix state is FINISHED.
fail_fast (bool):
If true, only a single attempt at most will
be made to run each execution/shard in the
matrix. Flaky test attempts are not affected.
Normally, 2 or more attempts are made if a
potential infrastructure issue is detected.
This feature is for latency sensitive workloads.
The incidence of execution failures may be
significantly greater for fail-fast matrices and
support is more limited because of that
expectation.
"""
test_matrix_id = proto.Field(
proto.STRING,
number=1,
)
project_id = proto.Field(
proto.STRING,
number=7,
)
client_info = proto.Field(
proto.MESSAGE,
number=10,
message='ClientInfo',
)
test_specification = proto.Field(
proto.MESSAGE,
number=3,
message='TestSpecification',
)
environment_matrix = proto.Field(
proto.MESSAGE,
number=4,
message='EnvironmentMatrix',
)
test_executions = proto.RepeatedField(
proto.MESSAGE,
number=5,
message='TestExecution',
)
result_storage = proto.Field(
proto.MESSAGE,
number=6,
message='ResultStorage',
)
state = proto.Field(
proto.ENUM,
number=8,
enum='TestState',
)
timestamp = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
invalid_matrix_details = proto.Field(
proto.ENUM,
number=11,
enum='InvalidMatrixDetails',
)
flaky_test_attempts = proto.Field(
proto.INT32,
number=13,
)
outcome_summary = proto.Field(
proto.ENUM,
number=14,
enum='OutcomeSummary',
)
fail_fast = proto.Field(
proto.BOOL,
number=17,
)
class TestExecution(proto.Message):
r"""A single test executed in a single environment.
Attributes:
id (str):
Output only. Unique id set by the service.
matrix_id (str):
Output only. Id of the containing TestMatrix.
project_id (str):
Output only. The cloud project that owns the
test execution.
test_specification (google.devtools.testing_v1.types.TestSpecification):
Output only. How to run the test.
shard (google.devtools.testing_v1.types.Shard):
Output only. Details about the shard.
environment (google.devtools.testing_v1.types.Environment):
Output only. How the host machine(s) are
configured.
state (google.devtools.testing_v1.types.TestState):
Output only. Indicates the current progress
of the test execution (e.g., FINISHED).
tool_results_step (google.devtools.testing_v1.types.ToolResultsStep):
Output only. Where the results for this
execution are written.
timestamp (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time this test execution was
initially created.
test_details (google.devtools.testing_v1.types.TestDetails):
Output only. Additional details about the
running test.
"""
id = proto.Field(
proto.STRING,
number=1,
)
matrix_id = proto.Field(
proto.STRING,
number=9,
)
project_id = proto.Field(
proto.STRING,
number=10,
)
test_specification = proto.Field(
proto.MESSAGE,
number=3,
message='TestSpecification',
)
shard = proto.Field(
proto.MESSAGE,
number=12,
message='Shard',
)
environment = proto.Field(
proto.MESSAGE,
number=4,
message='Environment',
)
state = proto.Field(
proto.ENUM,
number=5,
enum='TestState',
)
tool_results_step = proto.Field(
proto.MESSAGE,
number=11,
message='ToolResultsStep',
)
timestamp = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
test_details = proto.Field(
proto.MESSAGE,
number=8,
message='TestDetails',
)
class TestSpecification(proto.Message):
r"""A description of how to run the test.
Attributes:
test_timeout (google.protobuf.duration_pb2.Duration):
Max time a test execution is allowed to run
before it is automatically cancelled.
The default value is 5 min.
test_setup (google.devtools.testing_v1.types.TestSetup):
Test setup requirements for Android e.g.
files to install, bootstrap scripts.
ios_test_setup (google.devtools.testing_v1.types.IosTestSetup):
Test setup requirements for iOS.
android_instrumentation_test (google.devtools.testing_v1.types.AndroidInstrumentationTest):
An Android instrumentation test.
android_robo_test (google.devtools.testing_v1.types.AndroidRoboTest):
An Android robo test.
android_test_loop (google.devtools.testing_v1.types.AndroidTestLoop):
An Android Application with a Test Loop.
ios_xc_test (google.devtools.testing_v1.types.IosXcTest):
An iOS XCTest, via an .xctestrun file.
ios_test_loop (google.devtools.testing_v1.types.IosTestLoop):
An iOS application with a test loop.
disable_video_recording (bool):
Disables video recording. May reduce test
latency.
disable_performance_metrics (bool):
Disables performance metrics recording. May
reduce test latency.
"""
test_timeout = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
test_setup = proto.Field(
proto.MESSAGE,
number=6,
oneof='setup',
message='TestSetup',
)
ios_test_setup = proto.Field(
proto.MESSAGE,
number=14,
oneof='setup',
message='IosTestSetup',
)
android_instrumentation_test = proto.Field(
proto.MESSAGE,
number=2,
oneof='test',
message='AndroidInstrumentationTest',
)
android_robo_test = proto.Field(
proto.MESSAGE,
number=3,
oneof='test',
message='AndroidRoboTest',
)
android_test_loop = proto.Field(
proto.MESSAGE,
number=9,
oneof='test',
message='AndroidTestLoop',
)
ios_xc_test = proto.Field(
proto.MESSAGE,
number=13,
oneof='test',
message='IosXcTest',
)
ios_test_loop = proto.Field(
proto.MESSAGE,
number=15,
oneof='test',
message='IosTestLoop',
)
disable_video_recording = proto.Field(
proto.BOOL,
number=10,
)
disable_performance_metrics = proto.Field(
proto.BOOL,
number=11,
)
class SystraceSetup(proto.Message):
r"""
Attributes:
duration_seconds (int):
Systrace duration in seconds.
Should be between 1 and 30 seconds. 0 disables
systrace.
"""
duration_seconds = proto.Field(
proto.INT32,
number=1,
)
class TestSetup(proto.Message):
r"""A description of how to set up the Android device prior to
running the test.
Attributes:
files_to_push (Sequence[google.devtools.testing_v1.types.DeviceFile]):
List of files to push to the device before
starting the test.
directories_to_pull (Sequence[str]):
List of directories on the device to upload to GCS at the
end of the test; they must be absolute paths under /sdcard,
/storage or /data/local/tmp. Path names are restricted to
characters a-z A-Z 0-9 \_ - . + and /
Note: The paths /sdcard and /data will be made available and
treated as implicit path substitutions. E.g. if /sdcard on a
particular device does not map to external storage, the
system will replace it with the external storage path prefix
for that device.
additional_apks (Sequence[google.devtools.testing_v1.types.Apk]):
APKs to install in addition to those being
directly tested. Currently capped at 100.
account (google.devtools.testing_v1.types.Account):
The device will be logged in on this account
for the duration of the test.
network_profile (str):
The network traffic profile used for running the test.
Available network profiles can be queried by using the
NETWORK_CONFIGURATION environment type when calling
TestEnvironmentDiscoveryService.GetTestEnvironmentCatalog.
environment_variables (Sequence[google.devtools.testing_v1.types.EnvironmentVariable]):
Environment variables to set for the test
(only applicable for instrumentation tests).
systrace (google.devtools.testing_v1.types.SystraceSetup):
Systrace configuration for the run.
If set a systrace will be taken, starting on
test start and lasting for the configured
duration. The systrace file thus obtained is put
in the results bucket together with the other
artifacts from the run.
dont_autogrant_permissions (bool):
Whether to prevent all runtime permissions to
be granted at app install
"""
files_to_push = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='DeviceFile',
)
directories_to_pull = proto.RepeatedField(
proto.STRING,
number=2,
)
additional_apks = proto.RepeatedField(
proto.MESSAGE,
number=3,
message='Apk',
)
account = proto.Field(
proto.MESSAGE,
number=4,
message='Account',
)
network_profile = proto.Field(
proto.STRING,
number=5,
)
environment_variables = proto.RepeatedField(
proto.MESSAGE,
number=6,
message='EnvironmentVariable',
)
systrace = proto.Field(
proto.MESSAGE,
number=9,
message='SystraceSetup',
)
dont_autogrant_permissions = proto.Field(
proto.BOOL,
number=23,
)
class IosTestSetup(proto.Message):
r"""A description of how to set up an iOS device prior to running
the test.
Attributes:
network_profile (str):
The network traffic profile used for running the test.
Available network profiles can be queried by using the
NETWORK_CONFIGURATION environment type when calling
TestEnvironmentDiscoveryService.GetTestEnvironmentCatalog.
additional_ipas (Sequence[google.devtools.testing_v1.types.FileReference]):
iOS apps to install in addition to those
being directly tested.
push_files (Sequence[google.devtools.testing_v1.types.IosDeviceFile]):
List of files to push to the device before
starting the test.
pull_directories (Sequence[google.devtools.testing_v1.types.IosDeviceFile]):
List of directories on the device to upload
to Cloud Storage at the end of the test.
Directories should either be in a shared
directory (e.g. /private/var/mobile/Media) or
within an accessible directory inside the app's
filesystem (e.g. /Documents) by specifying the
bundle id.
"""
network_profile = proto.Field(
proto.STRING,
number=1,
)
additional_ipas = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='FileReference',
)
push_files = proto.RepeatedField(
proto.MESSAGE,
number=3,
message='IosDeviceFile',
)
pull_directories = proto.RepeatedField(
proto.MESSAGE,
number=4,
message='IosDeviceFile',
)
class EnvironmentVariable(proto.Message):
r"""A key-value pair passed as an environment variable to the
test.
Attributes:
key (str):
Key for the environment variable.
value (str):
Value for the environment variable.
"""
key = proto.Field(
proto.STRING,
number=1,
)
value = proto.Field(
proto.STRING,
number=2,
)
class Account(proto.Message):
r"""Identifies an account and how to log into it.
Attributes:
google_auto (google.devtools.testing_v1.types.GoogleAuto):
An automatic google login account.
"""
google_auto = proto.Field(
proto.MESSAGE,
number=1,
oneof='account_type',
message='GoogleAuto',
)
class GoogleAuto(proto.Message):
r"""Enables automatic Google account login.
If set, the service automatically generates a Google test
account and adds it to the device, before executing the test.
Note that test accounts might be reused.
Many applications show their full set of functionalities when an
account is present on the device. Logging into the device with
these generated accounts allows testing more functionalities.
"""
class Apk(proto.Message):
r"""An Android package file to install.
Attributes:
location (google.devtools.testing_v1.types.FileReference):
The path to an APK to be installed on the
device before the test begins.
package_name (str):
The java package for the APK to be installed.
Value is determined by examining the
application's manifest.
"""
location = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
package_name = proto.Field(
proto.STRING,
number=2,
)
class AppBundle(proto.Message):
r"""An Android App Bundle file format, containing a
BundleConfig.pb file, a base module directory, zero or more
dynamic feature module directories. <p>See
https://developer.android.com/guide/app-bundle/build for
guidance on building App Bundles.
Attributes:
bundle_location (google.devtools.testing_v1.types.FileReference):
.aab file representing the app bundle under
test.
"""
bundle_location = proto.Field(
proto.MESSAGE,
number=1,
oneof='bundle',
message='FileReference',
)
class DeviceFile(proto.Message):
r"""A single device file description.
Attributes:
obb_file (google.devtools.testing_v1.types.ObbFile):
A reference to an opaque binary blob file.
regular_file (google.devtools.testing_v1.types.RegularFile):
A reference to a regular file.
"""
obb_file = proto.Field(
proto.MESSAGE,
number=1,
oneof='device_file',
message='ObbFile',
)
regular_file = proto.Field(
proto.MESSAGE,
number=2,
oneof='device_file',
message='RegularFile',
)
class ObbFile(proto.Message):
r"""An opaque binary blob file to install on the device before
the test starts.
Attributes:
obb_file_name (str):
Required. OBB file name which must conform to the format as
specified by Android e.g.
[main|patch].0300110.com.example.android.obb which will be
installed into <shared-storage>/Android/obb/<package-name>/
on the device.
obb (google.devtools.testing_v1.types.FileReference):
Required. Opaque Binary Blob (OBB) file(s) to
install on the device.
"""
obb_file_name = proto.Field(
proto.STRING,
number=1,
)
obb = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
class RegularFile(proto.Message):
r"""A file or directory to install on the device before the test
starts.
Attributes:
content (google.devtools.testing_v1.types.FileReference):
Required. The source file.
device_path (str):
Required. Where to put the content on the device. Must be an
absolute, allowlisted path. If the file exists, it will be
replaced. The following device-side directories and any of
their subdirectories are allowlisted:
.. raw:: html
<p>${EXTERNAL_STORAGE}, /sdcard, or /storage</p>
<p>${ANDROID_DATA}/local/tmp, or /data/local/tmp</p>
<p>Specifying a path outside of these directory trees is invalid.
.. raw:: html
<p> The paths /sdcard and /data will be made available and treated as
implicit path substitutions. E.g. if /sdcard on a particular device does
not map to external storage, the system will replace it with the external
storage path prefix for that device and copy the file there.
.. raw:: html
<p> It is strongly advised to use the <a href=
"http://developer.android.com/reference/android/os/Environment.html">
Environment API</a> in app and test code to access files on the device in a
portable way.
"""
content = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
device_path = proto.Field(
proto.STRING,
number=2,
)
class IosDeviceFile(proto.Message):
r"""A file or directory to install on the device before the test
starts.
Attributes:
content (google.devtools.testing_v1.types.FileReference):
The source file
bundle_id (str):
The bundle id of the app where this file
lives.
iOS apps sandbox their own filesystem, so app
files must specify which app installed on the
device.
device_path (str):
Location of the file on the device, inside
the app's sandboxed filesystem
"""
content = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
bundle_id = proto.Field(
proto.STRING,
number=2,
)
device_path = proto.Field(
proto.STRING,
number=3,
)
class AndroidTestLoop(proto.Message):
r"""A test of an Android Application with a Test Loop.
The intent \<intent-name\> will be implicitly added, since Games
is the only user of this api, for the time being.
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
app_package_id (str):
The java package for the application under
test. The default is determined by examining the
application's manifest.
scenarios (Sequence[int]):
The list of scenarios that should be run
during the test. The default is all test loops,
derived from the application's manifest.
scenario_labels (Sequence[str]):
The list of scenario labels that should be run during the
test. The scenario labels should map to labels defined in
the application's manifest. For example, player_experience
and com.google.test.loops.player_experience add all of the
loops labeled in the manifest with the
com.google.test.loops.player_experience name to the
execution. Scenarios can also be specified in the scenarios
field.
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=5,
oneof='app_under_test',
message='AppBundle',
)
app_package_id = proto.Field(
proto.STRING,
number=2,
)
scenarios = proto.RepeatedField(
proto.INT32,
number=3,
)
scenario_labels = proto.RepeatedField(
proto.STRING,
number=4,
)
class IosXcTest(proto.Message):
r"""A test of an iOS application that uses the XCTest framework.
Xcode supports the option to "build for testing", which
generates an .xctestrun file that contains a test specification
(arguments, test methods, etc). This test type accepts a zip
file containing the .xctestrun file and the corresponding
contents of the Build/Products directory that contains all the
binaries needed to run the tests.
Attributes:
tests_zip (google.devtools.testing_v1.types.FileReference):
Required. The .zip containing the .xctestrun
file and the contents of the
DerivedData/Build/Products directory. The
.xctestrun file in this zip is ignored if the
xctestrun field is specified.
xctestrun (google.devtools.testing_v1.types.FileReference):
An .xctestrun file that will override the
.xctestrun file in the tests zip. Because the
.xctestrun file contains environment variables
along with test methods to run and/or ignore,
this can be useful for sharding tests. Default
is taken from the tests zip.
xcode_version (str):
The Xcode version that should be used for the
test. Use the TestEnvironmentDiscoveryService to
get supported options. Defaults to the latest
Xcode version Firebase Test Lab supports.
app_bundle_id (str):
Output only. The bundle id for the
application under test.
test_special_entitlements (bool):
The option to test special app entitlements.
Setting this would re-sign the app having
special entitlements with an explicit
application-identifier. Currently supports
testing aps-environment entitlement.
"""
tests_zip = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
xctestrun = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
xcode_version = proto.Field(
proto.STRING,
number=3,
)
app_bundle_id = proto.Field(
proto.STRING,
number=4,
)
test_special_entitlements = proto.Field(
proto.BOOL,
number=6,
)
class IosTestLoop(proto.Message):
r"""A test of an iOS application that implements one or more game
loop scenarios. This test type accepts an archived application
(.ipa file) and a list of integer scenarios that will be
executed on the app sequentially.
Attributes:
app_ipa (google.devtools.testing_v1.types.FileReference):
Required. The .ipa of the application to
test.
scenarios (Sequence[int]):
The list of scenarios that should be run
during the test. Defaults to the single scenario
0 if unspecified.
app_bundle_id (str):
Output only. The bundle id for the
application under test.
"""
app_ipa = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
scenarios = proto.RepeatedField(
proto.INT32,
number=2,
)
app_bundle_id = proto.Field(
proto.STRING,
number=3,
)
class AndroidInstrumentationTest(proto.Message):
r"""A test of an Android application that can control an Android
component independently of its normal lifecycle. Android
instrumentation tests run an application APK and test APK inside the
same process on a virtual or physical AndroidDevice. They also
specify a test runner class, such as com.google.GoogleTestRunner,
which can vary on the specific instrumentation framework chosen.
See http://developer.android.com/tools/testing/testing_android.html
for more information on types of Android tests.
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
test_apk (google.devtools.testing_v1.types.FileReference):
Required. The APK containing the test code to
be executed.
app_package_id (str):
The java package for the application under
test. The default value is determined by
examining the application's manifest.
test_package_id (str):
The java package for the test to be executed.
The default value is determined by examining the
application's manifest.
test_runner_class (str):
The InstrumentationTestRunner class.
The default value is determined by examining the
application's manifest.
test_targets (Sequence[str]):
Each target must be fully qualified with the package name or
class name, in one of these formats:
- "package package_name"
- "class package_name.class_name"
- "class package_name.class_name#method_name"
If empty, all targets in the module will be run.
orchestrator_option (google.devtools.testing_v1.types.OrchestratorOption):
The option of whether running each test within its own
invocation of instrumentation with Android Test Orchestrator
or not. \*\* Orchestrator is only compatible with
AndroidJUnitRunner version 1.0 or higher! \*\* Orchestrator
offers the following benefits:
- No shared state
- Crashes are isolated
- Logs are scoped per test
See
https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator
for more information about Android Test Orchestrator.
If not set, the test will be run without the orchestrator.
sharding_option (google.devtools.testing_v1.types.ShardingOption):
The option to run tests in multiple shards in
parallel.
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=8,
oneof='app_under_test',
message='AppBundle',
)
test_apk = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
app_package_id = proto.Field(
proto.STRING,
number=3,
)
test_package_id = proto.Field(
proto.STRING,
number=4,
)
test_runner_class = proto.Field(
proto.STRING,
number=5,
)
test_targets = proto.RepeatedField(
proto.STRING,
number=6,
)
orchestrator_option = proto.Field(
proto.ENUM,
number=7,
enum='OrchestratorOption',
)
sharding_option = proto.Field(
proto.MESSAGE,
number=9,
message='ShardingOption',
)
class AndroidRoboTest(proto.Message):
r"""A test of an android application that explores the
application on a virtual or physical Android Device, finding
culprits and crashes as it goes. Next tag: 30
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
app_package_id (str):
The java package for the application under
test. The default value is determined by
examining the application's manifest.
app_initial_activity (str):
The initial activity that should be used to
start the app.
max_depth (int):
The max depth of the traversal stack Robo can
explore. Needs to be at least 2 to make Robo
explore the app beyond the first activity.
Default is 50.
max_steps (int):
The max number of steps Robo can execute.
Default is no limit.
robo_directives (Sequence[google.devtools.testing_v1.types.RoboDirective]):
A set of directives Robo should apply during
the crawl. This allows users to customize the
crawl. For example, the username and password
for a test account can be provided.
robo_script (google.devtools.testing_v1.types.FileReference):
A JSON file with a sequence of actions Robo
should perform as a prologue for the crawl.
starting_intents (Sequence[google.devtools.testing_v1.types.RoboStartingIntent]):
The intents used to launch the app for the
crawl. If none are provided, then the main
launcher activity is launched. If some are
provided, then only those provided are launched
(the main launcher activity must be provided
explicitly).
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=16,
oneof='app_under_test',
message='AppBundle',
)
app_package_id = proto.Field(
proto.STRING,
number=2,
)
app_initial_activity = proto.Field(
proto.STRING,
number=3,
)
max_depth = proto.Field(
proto.INT32,
number=7,
)
max_steps = proto.Field(
proto.INT32,
number=8,
)
robo_directives = proto.RepeatedField(
proto.MESSAGE,
number=11,
message='RoboDirective',
)
robo_script = proto.Field(
proto.MESSAGE,
number=13,
message='FileReference',
)
starting_intents = proto.RepeatedField(
proto.MESSAGE,
number=15,
message='RoboStartingIntent',
)
class RoboDirective(proto.Message):
r"""Directs Robo to interact with a specific UI element if it is
encountered during the crawl. Currently, Robo can perform text
entry or element click.
Attributes:
resource_name (str):
Required. The android resource name of the
target UI element. For example,
in Java: R.string.foo
in xml: @string/foo
Only the "foo" part is needed.
Reference doc:
https://developer.android.com/guide/topics/resources/accessing-
resources.html
input_text (str):
The text that Robo is directed to set. If left empty, the
directive will be treated as a CLICK on the element matching
the resource_name.
action_type (google.devtools.testing_v1.types.RoboActionType):
Required. The type of action that Robo should
perform on the specified element.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
input_text = proto.Field(
proto.STRING,
number=2,
)
action_type = proto.Field(
proto.ENUM,
number=3,
enum='RoboActionType',
)
class RoboStartingIntent(proto.Message):
r"""Message for specifying the start activities to crawl.
Attributes:
launcher_activity (google.devtools.testing_v1.types.LauncherActivityIntent):
An intent that starts the main launcher
activity.
start_activity (google.devtools.testing_v1.types.StartActivityIntent):
An intent that starts an activity with
specific details.
timeout (google.protobuf.duration_pb2.Duration):
Timeout in seconds for each intent.
"""
launcher_activity = proto.Field(
proto.MESSAGE,
number=1,
oneof='starting_intent',
message='LauncherActivityIntent',
)
start_activity = proto.Field(
proto.MESSAGE,
number=2,
oneof='starting_intent',
message='StartActivityIntent',
)
timeout = proto.Field(
proto.MESSAGE,
number=3,
message=duration_pb2.Duration,
)
class LauncherActivityIntent(proto.Message):
r"""Specifies an intent that starts the main launcher activity.
"""
class StartActivityIntent(proto.Message):
r"""A starting intent specified by an action, uri, and
categories.
Attributes:
action (str):
Action name. Required for START_ACTIVITY.
uri (str):
URI for the action.
categories (Sequence[str]):
Intent categories to set on the intent.
"""
action = proto.Field(
proto.STRING,
number=2,
)
uri = proto.Field(
proto.STRING,
number=3,
)
categories = proto.RepeatedField(
proto.STRING,
number=4,
)
class EnvironmentMatrix(proto.Message):
r"""The matrix of environments in which the test is to be
executed.
Attributes:
android_matrix (google.devtools.testing_v1.types.AndroidMatrix):
A matrix of Android devices.
android_device_list (google.devtools.testing_v1.types.AndroidDeviceList):
A list of Android devices; the test will be
run only on the specified devices.
ios_device_list (google.devtools.testing_v1.types.IosDeviceList):
A list of iOS devices.
"""
android_matrix = proto.Field(
proto.MESSAGE,
number=1,
oneof='environment_matrix',
message='AndroidMatrix',
)
android_device_list = proto.Field(
proto.MESSAGE,
number=2,
oneof='environment_matrix',
message='AndroidDeviceList',
)
ios_device_list = proto.Field(
proto.MESSAGE,
number=3,
oneof='environment_matrix',
message='IosDeviceList',
)
class AndroidDeviceList(proto.Message):
r"""A list of Android device configurations in which the test is
to be executed.
Attributes:
android_devices (Sequence[google.devtools.testing_v1.types.AndroidDevice]):
Required. A list of Android devices.
"""
android_devices = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='AndroidDevice',
)
class IosDeviceList(proto.Message):
r"""A list of iOS device configurations in which the test is to
be executed.
Attributes:
ios_devices (Sequence[google.devtools.testing_v1.types.IosDevice]):
Required. A list of iOS devices.
"""
ios_devices = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='IosDevice',
)
class AndroidMatrix(proto.Message):
r"""A set of Android device configuration permutations is defined
by the the cross-product of the given axes. Internally, the
given AndroidMatrix will be expanded into a set of
AndroidDevices.
Only supported permutations will be instantiated. Invalid
permutations (e.g., incompatible models/versions) are ignored.
Attributes:
android_model_ids (Sequence[str]):
Required. The ids of the set of Android
device to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
android_version_ids (Sequence[str]):
Required. The ids of the set of Android OS
version to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
locales (Sequence[str]):
Required. The set of locales the test device
will enable for testing. Use the
TestEnvironmentDiscoveryService to get supported
options.
orientations (Sequence[str]):
Required. The set of orientations to test
with. Use the TestEnvironmentDiscoveryService to
get supported options.
"""
android_model_ids = proto.RepeatedField(
proto.STRING,
number=1,
)
android_version_ids = proto.RepeatedField(
proto.STRING,
number=2,
)
locales = proto.RepeatedField(
proto.STRING,
number=3,
)
orientations = proto.RepeatedField(
proto.STRING,
number=4,
)
class ClientInfo(proto.Message):
r"""Information about the client which invoked the test.
Attributes:
name (str):
Required. Client name, such as gcloud.
client_info_details (Sequence[google.devtools.testing_v1.types.ClientInfoDetail]):
The list of detailed information about
client.
"""
name = proto.Field(
proto.STRING,
number=1,
)
client_info_details = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='ClientInfoDetail',
)
class ClientInfoDetail(proto.Message):
r"""Key-value pair of detailed information about the client which
invoked the test. Examples: {'Version', '1.0'}, {'Release
Track', 'BETA'}.
Attributes:
key (str):
Required. The key of detailed client
information.
value (str):
Required. The value of detailed client
information.
"""
key = proto.Field(
proto.STRING,
number=1,
)
value = proto.Field(
proto.STRING,
number=2,
)
class ResultStorage(proto.Message):
r"""Locations where the results of running the test are stored.
Attributes:
google_cloud_storage (google.devtools.testing_v1.types.GoogleCloudStorage):
Required.
tool_results_history (google.devtools.testing_v1.types.ToolResultsHistory):
The tool results history that contains the
tool results execution that results are written
to.
If not provided, the service will choose an
appropriate value.
tool_results_execution (google.devtools.testing_v1.types.ToolResultsExecution):
Output only. The tool results execution that
results are written to.
results_url (str):
Output only. URL to the results in the
Firebase Web Console.
"""
google_cloud_storage = proto.Field(
proto.MESSAGE,
number=1,
message='GoogleCloudStorage',
)
tool_results_history = proto.Field(
proto.MESSAGE,
number=5,
message='ToolResultsHistory',
)
tool_results_execution = proto.Field(
proto.MESSAGE,
number=6,
message='ToolResultsExecution',
)
results_url = proto.Field(
proto.STRING,
number=7,
)
class ToolResultsHistory(proto.Message):
r"""Represents a tool results history resource.
Attributes:
project_id (str):
Required. The cloud project that owns the
tool results history.
history_id (str):
Required. A tool results history ID.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
history_id = proto.Field(
proto.STRING,
number=2,
)
class ToolResultsExecution(proto.Message):
r"""Represents a tool results execution resource.
This has the results of a TestMatrix.
Attributes:
project_id (str):
Output only. The cloud project that owns the
tool results execution.
history_id (str):
Output only. A tool results history ID.
execution_id (str):
Output only. A tool results execution ID.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
history_id = proto.Field(
proto.STRING,
number=2,
)
execution_id = proto.Field(
proto.STRING,
number=3,
)
class ToolResultsStep(proto.Message):
r"""Represents a tool results step resource.
This has the results of a TestExecution.
Attributes:
project_id (str):
Output only. The cloud project that owns the
tool results step.
history_id (str):
Output only. A tool results history ID.
execution_id (str):
Output only. A tool results execution ID.
step_id (str):
Output only. A tool results step ID.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
history_id = proto.Field(
proto.STRING,
number=2,
)
execution_id = proto.Field(
proto.STRING,
number=3,
)
step_id = proto.Field(
proto.STRING,
number=4,
)
class GoogleCloudStorage(proto.Message):
r"""A storage location within Google cloud storage (GCS).
Attributes:
gcs_path (str):
Required. The path to a directory in GCS that
will eventually contain the results for this
test. The requesting user must have write access
on the bucket in the supplied path.
"""
gcs_path = proto.Field(
proto.STRING,
number=1,
)
class FileReference(proto.Message):
r"""A reference to a file, used for user inputs.
Attributes:
gcs_path (str):
A path to a file in Google Cloud Storage.
Example: gs://build-
app-1414623860166/app%40debug-unaligned.apk
These paths are expected to be url encoded
(percent encoding)
"""
gcs_path = proto.Field(
proto.STRING,
number=1,
oneof='file',
)
class Environment(proto.Message):
r"""The environment in which the test is run.
Attributes:
android_device (google.devtools.testing_v1.types.AndroidDevice):
An Android device which must be used with an
Android test.
ios_device (google.devtools.testing_v1.types.IosDevice):
An iOS device which must be used with an iOS
test.
"""
android_device = proto.Field(
proto.MESSAGE,
number=1,
oneof='environment',
message='AndroidDevice',
)
ios_device = proto.Field(
proto.MESSAGE,
number=2,
oneof='environment',
message='IosDevice',
)
class AndroidDevice(proto.Message):
r"""A single Android device.
Attributes:
android_model_id (str):
Required. The id of the Android device to be
used. Use the TestEnvironmentDiscoveryService to
get supported options.
android_version_id (str):
Required. The id of the Android OS version to
be used. Use the TestEnvironmentDiscoveryService
to get supported options.
locale (str):
Required. The locale the test device used for
testing. Use the TestEnvironmentDiscoveryService
to get supported options.
orientation (str):
Required. How the device is oriented during
the test. Use the
TestEnvironmentDiscoveryService to get supported
options.
"""
android_model_id = proto.Field(
proto.STRING,
number=1,
)
android_version_id = proto.Field(
proto.STRING,
number=2,
)
locale = proto.Field(
proto.STRING,
number=3,
)
orientation = proto.Field(
proto.STRING,
number=4,
)
class IosDevice(proto.Message):
r"""A single iOS device.
Attributes:
ios_model_id (str):
Required. The id of the iOS device to be
used. Use the TestEnvironmentDiscoveryService to
get supported options.
ios_version_id (str):
Required. The id of the iOS major software
version to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
locale (str):
Required. The locale the test device used for
testing. Use the TestEnvironmentDiscoveryService
to get supported options.
orientation (str):
Required. How the device is oriented during
the test. Use the
TestEnvironmentDiscoveryService to get supported
options.
"""
ios_model_id = proto.Field(
proto.STRING,
number=1,
)
ios_version_id = proto.Field(
proto.STRING,
number=2,
)
locale = proto.Field(
proto.STRING,
number=3,
)
orientation = proto.Field(
proto.STRING,
number=4,
)
class TestDetails(proto.Message):
r"""Additional details about the progress of the running test.
Attributes:
progress_messages (Sequence[str]):
Output only. Human-readable, detailed descriptions of the
test's progress. For example: "Provisioning a device",
"Starting Test".
During the course of execution new data may be appended to
the end of progress_messages.
error_message (str):
Output only. If the TestState is ERROR, then
this string will contain human-readable details
about the error.
"""
progress_messages = proto.RepeatedField(
proto.STRING,
number=3,
)
error_message = proto.Field(
proto.STRING,
number=4,
)
class InvalidRequestDetail(proto.Message):
r"""Details behind an invalid request.
Attributes:
reason (google.devtools.testing_v1.types.InvalidRequestDetail.Reason):
The reason behind the error.
"""
class Reason(proto.Enum):
r"""Possible invalid request reasons."""
REASON_UNSPECIFIED = 0
REQUEST_INVALID = 1
RESOURCE_TOO_BIG = 2
RESOURCE_NOT_FOUND = 3
UNSUPPORTED = 4
NOT_IMPLEMENTED = 5
reason = proto.Field(
proto.ENUM,
number=1,
enum=Reason,
)
class ShardingOption(proto.Message):
r"""Options for enabling sharding.
Attributes:
uniform_sharding (google.devtools.testing_v1.types.UniformSharding):
Uniformly shards test cases given a total
number of shards.
manual_sharding (google.devtools.testing_v1.types.ManualSharding):
Shards test cases into the specified groups
of packages, classes, and/or methods.
"""
uniform_sharding = proto.Field(
proto.MESSAGE,
number=1,
oneof='option',
message='UniformSharding',
)
manual_sharding = proto.Field(
proto.MESSAGE,
number=2,
oneof='option',
message='ManualSharding',
)
class UniformSharding(proto.Message):
r"""Uniformly shards test cases given a total number of shards.
For Instrumentation test, it will be translated to "-e numShard" "-e
shardIndex" AndroidJUnitRunner arguments. With uniform sharding
enabled, specifying these sharding arguments via
environment_variables is invalid.
Attributes:
num_shards (int):
Required. Total number of shards. When any
physical devices are selected, the number must
be >= 1 and <= 50. When no physical devices are
selected, the number must be >= 1 and <= 500.
"""
num_shards = proto.Field(
proto.INT32,
number=1,
)
class ManualSharding(proto.Message):
r"""Shards test cases into the specified groups of packages, classes,
and/or methods.
With manual sharding enabled, specifying test targets via
environment_variables or in InstrumentationTest is invalid.
Attributes:
test_targets_for_shard (Sequence[google.devtools.testing_v1.types.TestTargetsForShard]):
Required. Group of packages, classes, and/or test methods to
be run for each shard. When any physical devices are
selected, the number of test_targets_for_shard must be >= 1
and <= 50. When no physical devices are selected, the number
must be >= 1 and <= 500.
"""
test_targets_for_shard = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='TestTargetsForShard',
)
class TestTargetsForShard(proto.Message):
r"""Test targets for a shard.
Attributes:
test_targets (Sequence[str]):
Group of packages, classes, and/or test methods to be run
for each shard. The targets need to be specified in
AndroidJUnitRunner argument format. For example, "package
com.my.packages" "class com.my.package.MyClass".
The number of shard_test_targets must be greater than 0.
"""
test_targets = proto.RepeatedField(
proto.STRING,
number=1,
)
class Shard(proto.Message):
r"""Output only. Details about the shard.
Attributes:
shard_index (int):
Output only. The index of the shard among all
the shards.
num_shards (int):
Output only. The total number of shards.
test_targets_for_shard (google.devtools.testing_v1.types.TestTargetsForShard):
Output only. Test targets for each shard.
"""
shard_index = proto.Field(
proto.INT32,
number=1,
)
num_shards = proto.Field(
proto.INT32,
number=2,
)
test_targets_for_shard = proto.Field(
proto.MESSAGE,
number=3,
message='TestTargetsForShard',
)
class CreateTestMatrixRequest(proto.Message):
r"""Request to submit a matrix of tests for execution.
Attributes:
project_id (str):
The GCE project under which this job will
run.
test_matrix (google.devtools.testing_v1.types.TestMatrix):
The matrix of tests that the user wants to
run.
request_id (str):
A string id used to detect duplicated
requests. Ids are automatically scoped to a
project, so users should ensure the ID is unique
per-project. A UUID is recommended.
Optional, but strongly recommended.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
test_matrix = proto.Field(
proto.MESSAGE,
number=2,
message='TestMatrix',
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class GetTestMatrixRequest(proto.Message):
r"""Request to get the Test Matrix with the given id.
Attributes:
project_id (str):
Cloud project that owns the test matrix.
test_matrix_id (str):
Unique test matrix id which was assigned by
the service.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
test_matrix_id = proto.Field(
proto.STRING,
number=2,
)
class CancelTestMatrixRequest(proto.Message):
r"""Request to stop running all of the tests in the specified
matrix.
Attributes:
project_id (str):
Cloud project that owns the test.
test_matrix_id (str):
Test matrix that will be canceled.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
test_matrix_id = proto.Field(
proto.STRING,
number=2,
)
class CancelTestMatrixResponse(proto.Message):
r"""Response containing the current state of the specified test
matrix.
Attributes:
test_state (google.devtools.testing_v1.types.TestState):
The current rolled-up state of the test
matrix. If this state is already final, then the
cancelation request will have no effect.
"""
test_state = proto.Field(
proto.ENUM,
number=1,
enum='TestState',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.029472 | 108 | 0.619953 | 56,592 | 0.957596 | 0 | 0 | 0 | 0 | 0 | 0 | 39,512 | 0.668584 |
eb3c4ae70f222dd8a499b8678c9508db3922f5b5 | 1,457 | py | Python | CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
]
| 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
]
| 162 | 2021-03-09T01:52:11.000Z | 2022-03-12T01:09:07.000Z | CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py | impastasyndrome/DS-ALGO-OFFICIAL | c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a | [
"Apache-2.0"
]
| 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | from collections import defaultdict
class WordDistance(object):
def __init__(self, words):
"""
initialize your data structure here.
:type words: List[str]
"""
self.indice = defaultdict(list)
self.memo = {}
self.MAXLEN = len(words)
for i, word in enumerate(words):
self.indice[word].append(i)
def shortest(self, word1, word2):
"""
Adds a word into the data structure.
:type word1: str
:type word2: str
:rtype: int
"""
if (word1, word2) in self.memo:
return self.memo[(word1, word2)]
l1, l2 = self.indice[word1], self.indice[word2]
idx1, idx2 = 0, 0
min_distance = self.MAXLEN
while True:
if idx1 >= len(l1) or idx2 >= len(l2):
break
if l1[idx1] < l2[idx2]:
if l2[idx2] - l1[idx1] < min_distance:
min_distance = l2[idx2] - l1[idx1]
idx1 += 1
else:
if l1[idx1] - l2[idx2] < min_distance:
min_distance = l1[idx1] - l2[idx2]
idx2 += 1
self.memo[(word1, word2)] = min_distance
return min_distance
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2")
| 29.734694 | 67 | 0.539465 | 1,213 | 0.832533 | 0 | 0 | 0 | 0 | 0 | 0 | 420 | 0.288264 |
eb41c235a81322c2905a0154804ac4a18d5c346c | 1,060 | py | Python | src/sentimentClassification.py | MaxPowerScience/EnglishSentiment | 119eeb6e1ee9f24805fbad6650d1a9c3e305f952 | [
"Apache-2.0"
]
| null | null | null | src/sentimentClassification.py | MaxPowerScience/EnglishSentiment | 119eeb6e1ee9f24805fbad6650d1a9c3e305f952 | [
"Apache-2.0"
]
| null | null | null | src/sentimentClassification.py | MaxPowerScience/EnglishSentiment | 119eeb6e1ee9f24805fbad6650d1a9c3e305f952 | [
"Apache-2.0"
]
| null | null | null | from perceptron import train_network, create_perceptron, test_network
from preprocessingData import get_ids_matrix, separate_test_and_training_data, read_word_list
from extractRawData import get_raw_data
from lstm import create_lstm, create_lstm_with_tensorflow
def main():
all_texts, pos_texts, neg_texts = get_raw_data()
dictionary, word_vectors = read_word_list()
ids = get_ids_matrix(all_texts, dictionary)
max_seq_length = len(ids[0])
trainX, trainY, testX, testY = separate_test_and_training_data(pos_texts, neg_texts, ids)
#model = create_perceptron(max_seq_length, len(dictionary))
#model = create_lstm(max_seq_length, len(dictionary), word_vectors)
#train_network(trainX, trainY, model)
create_lstm_with_tensorflow(word_vectors, trainY)
#snapshot_name = "perceptron_20180220-152036.tfl"
#load_folder = '../models/perceptron/'
#load_path = load_folder + snapshot_name
#model.load(load_path)
#print('Model loaded')
#test_network(testX, testY, model)
if __name__ == "__main__":
main() | 37.857143 | 93 | 0.766038 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.356604 |
eb41c51ce9970b54d5b685bba4f5e3319c3b6398 | 33,225 | py | Python | Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py | databricks-academy/developer-essentials-capstone | 77e70b1eb5b49b5f6779495fac7d14f5fadded9d | [
"CC0-1.0"
]
| 1 | 2022-02-08T03:56:32.000Z | 2022-02-08T03:56:32.000Z | Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py | databricks-academy/developer-essentials-capstone | 77e70b1eb5b49b5f6779495fac7d14f5fadded9d | [
"CC0-1.0"
]
| null | null | null | Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py | databricks-academy/developer-essentials-capstone | 77e70b1eb5b49b5f6779495fac7d14f5fadded9d | [
"CC0-1.0"
]
| 4 | 2022-01-01T09:41:31.000Z | 2022-02-17T09:48:05.000Z | # Databricks notebook source
import builtins as BI
# Setup the capstone
import re, uuid
from pyspark.sql.types import StructType, StringType, IntegerType, TimestampType, DoubleType
from pyspark.sql.functions import col, to_date, weekofyear
from pyspark.sql import DataFrame
static_tests = None
bronze_tests = None
silver_tests = None
gold_tests = None
registration_id = None
final_passed = False
course_name = "Core Partner Enablement"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
user_db = f"dbacademy_{clean_username}_dev_ess_cap"
working_dir = f"dbfs:/user/{username}/dbacademy/dev-ess-cap"
outputPathBronzeTest = f"{working_dir}/bronze_test"
outputPathSilverTest = f"{working_dir}/silver_test"
outputPathGoldTest = f"{working_dir}/gold_test"
source_path = f"wasbs://[email protected]/developer-essentials-capstone/v01"
eventSchema = ( StructType()
.add('eventName', StringType())
.add('eventParams', StructType()
.add('game_keyword', StringType())
.add('app_name', StringType())
.add('scoreAdjustment', IntegerType())
.add('platform', StringType())
.add('app_version', StringType())
.add('device_id', StringType())
.add('client_event_time', TimestampType())
.add('amount', DoubleType())
)
)
class Key:
singleStreamDF = (spark
.readStream
.schema(eventSchema)
.option('streamName','mobilestreaming_test')
.option("maxFilesPerTrigger", 1)
.json(f"{source_path}/solutions/single")
)
bronzeDF = spark.read.format("delta").load(f"{source_path}/solutions/bronze")
correctLookupDF = spark.read.format("delta").load(f"{source_path}/solutions/lookup")
silverDF = spark.read.format("delta").load(f"{source_path}/solutions/silver")
goldDF = spark.read.format("delta").load(f"{source_path}/solutions/gold")
print(f"Declared the following variables:")
print(f" * user_db: {user_db}")
print(f" * working_dir: {working_dir}")
print()
print(f"Declared the following function:")
print(f" * realityCheckBronze(..)")
print(f" * realityCheckStatic(..)")
print(f" * realityCheckSilver(..)")
print(f" * realityCheckGold(..)")
print(f" * realityCheckFinal()")
# COMMAND ----------
def path_exists(path):
try:
return len(dbutils.fs.ls(path)) >= 0
except Exception:
return False
def install_exercise_datasets(reinstall):
global registration_id
min_time = "1 minute"
max_time = "5 minutes"
existing = path_exists(f"{working_dir}/lookup_data") and path_exists(f"{working_dir}/event_source")
if not reinstall and existing:
print(f"Skipping install of existing datasets to\n{working_dir}/lookup_data and\n{working_dir}/event_source")
registration_id = spark.read.json(f"{working_dir}/_meta/config.json").first()["registration_id"]
return
# Remove old versions of the previously installed datasets
if existing:
print(f"Removing previously installed datasets from\n{working_dir}/lookup_data and\n{working_dir}/event_source\n")
dbutils.fs.rm(f"{working_dir}/lookup_data", True)
dbutils.fs.rm(f"{source_path}/event_source", True)
print(f"""Installing the datasets to\n{working_dir}/lookup_data\n{working_dir}/event_source""")
print(f"""\nNOTE: The datasets that we are installing are located in Washington, USA - depending on the
region that your workspace is in, this operation can take as little as {min_time} and
upwards to {max_time}, but this is a one-time operation.""")
dbutils.fs.cp(f"{source_path}/lookup_data", f"{working_dir}/lookup_data", True)
dbutils.fs.cp(f"{source_path}/event_source/part-00000-tid-6718866119967790308-cef1b03e-5fda-4259-885e-e992ca3996c3-25700-c000.json",
f"{working_dir}/event_source/file-0.json")
dbutils.fs.cp(f"{source_path}/event_source/part-00001-tid-6718866119967790308-cef1b03e-5fda-4259-885e-e992ca3996c3-25701-c000.json",
f"{working_dir}/event_source/file-1.json")
dbutils.fs.cp(f"{source_path}/event_source/part-00002-tid-6718866119967790308-cef1b03e-5fda-4259-885e-e992ca3996c3-25702-c000.json",
f"{working_dir}/event_source/file-2.json")
registration_id = str(uuid.uuid4()).replace("-","")
payload = f"""\u007b"registration_id": "{registration_id}"\u007d\n"""
dbutils.fs.put(f"{working_dir}/_meta/config.json", payload, overwrite=True)
print(f"""\nThe install of the datasets completed successfully.""")
try: reinstall = dbutils.widgets.get("reinstall").lower() == "true"
except: reinstall = False
install_exercise_datasets(reinstall)
print(f"\nYour Registration ID is {registration_id}")
# COMMAND ----------
# Setup Bronze
from pyspark.sql import DataFrame
import time
def realityCheckBronze(writeToBronze):
global bronze_tests
bronze_tests = TestSuite()
dbutils.fs.rm(outputPathBronzeTest, True)
dbutils.fs.rm(f"{outputPathBronzeTest}_checkpoint", True)
try:
writeToBronze(Key.singleStreamDF, outputPathBronzeTest, "bronze_test")
def groupAndCount(df: DataFrame):
return df.select('eventName').groupBy('eventName').count()
for s in spark.streams.active:
if s.name == "bronze_test":
first = True
while (len(s.recentProgress) == 0):
if first:
print("waiting for stream to start...")
first = False
time.sleep(5)
try:
testDF = (spark
.read
.format("delta")
.load(outputPathBronzeTest))
except Exception as e:
print(e)
testDF = (spark
.read
.load(outputPathBronzeTest))
test_dtype = findColumnDatatype(testDF, 'eventDate')
historyDF = spark.sql("DESCRIBE HISTORY delta.`{}`".format(outputPathBronzeTest))
bronze_tests.test(id = "rc_bronze_delta_format", points = 2, description = "Is in Delta format",
testFunction = lambda: isDelta(outputPathBronzeTest))
bronze_tests.test(id = "rc_bronze_contains_columns", points = 2, description = "Dataframe contains eventDate column",
testFunction = lambda: verifyColumnsExists(testDF, ['eventDate']))
bronze_tests.test(id = "rc_bronze_correct_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.bronzeDF.schema))
bronze_tests.test(id = "rc_bronze_column_check", points = 2, description = "eventDate column is correct data type",
testFunction = lambda: test_dtype == "date")
bronze_tests.test(id = "rc_bronze_null_check", points = 2, description = "Does not contain nulls",
testFunction = lambda: checkForNulls(testDF, 'eventParams'))
bronze_tests.test(id = "rc_bronze_is_streaming", points = 2, description = "Is streaming DataFrame",
testFunction = lambda: isStreamingDataframe(historyDF))
bronze_tests.test(id = "rc_bronze_output_mode", points = 2, description = "Output mode is Append",
testFunction = lambda: checkOutputMode(historyDF, "Append"))
bronze_tests.test(id = "rc_bronze_correct_rows", points = 2, description = "Returns a Dataframe with the correct number of rows",
testFunction = lambda: testDF.count() == Key.bronzeDF.count())
bronze_tests.test(id = "rc_bronze_correct_df", points = 2, description = "Returns the correct Dataframe",
testFunction = lambda: compareDataFrames(groupAndCount(testDF), groupAndCount(Key.bronzeDF)))
daLogger.logTestSuite("Bronze Reality Check", registration_id, bronze_tests)
bronze_tests.displayResults()
finally:
for s in spark.streams.active:
if s.name == 'bronze_test':
try:
s.stop()
except Exception as e:
print('!!', e)
None
# COMMAND ----------
# Setup Static
def realityCheckStatic(loadStaticData):
global static_tests
static_tests = TestSuite()
testDF = loadStaticData(f"{source_path}/solutions/lookup")
static_tests.test(id = "rc_static_count", points = 2, description = "Has the correct number of rows",
testFunction = lambda: testDF.count() == 475)
static_tests.test(id = "rc_static_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.correctLookupDF.schema))
daLogger.logTestSuite("Static Reality Check", registration_id, static_tests)
static_tests.displayResults()
None
# COMMAND ----------
# Setup Silver
def realityCheckSilver(bronzeToSilver):
global silver_tests
silver_tests = TestSuite()
dbutils.fs.rm(outputPathSilverTest, True)
dbutils.fs.rm(f"{outputPathSilverTest}_checkpoint", True)
try:
bronzeToSilver(outputPathBronzeTest, outputPathSilverTest, "silver_test", Key.correctLookupDF)
def groupAndCount(df: DataFrame):
try:
return df.select('deviceType').groupBy('deviceType').count()
except:
print("deviceType not found")
for s in spark.streams.active:
first = True
while (len(s.recentProgress) == 0):
if first:
print("waiting for stream to start...")
first = False
time.sleep(5)
try:
testDF = (spark
.read
.format("delta")
.load(outputPathSilverTest))
except Exception as e:
testDF = (spark
.read
.load(outputPathSilverTest))
historyDF = spark.sql("DESCRIBE HISTORY delta.`{}`".format(outputPathSilverTest))
silver_tests.test(id = "rc_silver_delta_format", points = 2, description = "Is in Delta format",
testFunction = lambda: isDelta(outputPathSilverTest))
silver_tests.test(id = "rc_silver_contains_columns", points = 2, description = "Dataframe contains device_id, client_event_time, deviceType columns",
testFunction = lambda: verifyColumnsExists(testDF, ["device_id", "client_event_time", "deviceType"]))
silver_tests.test(id = "rc_silver_correct_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.silverDF.schema))
silver_tests.test(id = "rc_silver_null_check", points = 2, description = "Does not contain nulls",
testFunction = lambda: checkForNulls(testDF, "eventName"))
silver_tests.test(id = "rc_silver_is_streaming", points = 2, description = "Is streaming DataFrame",
testFunction = lambda: isStreamingDataframe(historyDF))
silver_tests.test(id = "rc_silver_output_mode", points = 2, description = "Output mode is Append",
testFunction = lambda: checkOutputMode(historyDF, "Append"))
silver_tests.test(id = "rc_silver_correct_rows", points = 2, description = "Returns a Dataframe with the correct number of rows",
testFunction = lambda: testDF.count() == Key.silverDF.count())
silver_tests.test(id = "rc_silver_correct_df", points = 2, description = "Returns the correct Dataframe",
testFunction = lambda: compareDataFrames(groupAndCount(testDF), groupAndCount(Key.silverDF)))
daLogger.logTestSuite("Silver Reality Check", registration_id, silver_tests)
silver_tests.displayResults()
finally:
for s in spark.streams.active:
if s.name == 'silver_test':
s.stop()
None
# COMMAND ----------
# Setup Gold
def realityCheckGold(silverToGold):
global gold_tests
gold_tests = TestSuite()
dbutils.fs.rm(outputPathGoldTest, True)
dbutils.fs.rm(f"{outputPathGoldTest}_checkpoint", True)
try:
silverToGold(outputPathSilverTest, outputPathGoldTest, "gold_test")
for s in spark.streams.active:
first = True
while (len(s.recentProgress) == 0):
if first:
print("waiting for stream to start...")
first = False
time.sleep(5)
try:
testDF = (spark
.read
.format("delta")
.load(outputPathGoldTest))
except Exception as e:
testDF = (spark
.read
.load(outputPathGoldTest))
historyDF = spark.sql("DESCRIBE HISTORY delta.`{}`".format(outputPathGoldTest))
gold_tests.test(id = "rc_gold_delta_format", points = 2, description = "Is in Delta format",
testFunction = lambda: isDelta(outputPathGoldTest))
gold_tests.test(id = "rc_gold_contains_columns", points = 2, description = "Dataframe contains week and WAU columns",
testFunction = lambda: verifyColumnsExists(testDF, ["week", "WAU"]))
gold_tests.test(id = "rc_gold_correct_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.goldDF.schema))
gold_tests.test(id = "rc_gold_null_check", points = 2, description = "Does not contain nulls",
testFunction = lambda: checkForNulls(testDF, "eventName"))
gold_tests.test(id = "rc_gold_is_streaming", points = 2, description = "Is streaming DataFrame",
testFunction = lambda: isStreamingDataframe(historyDF))
gold_tests.test(id = "rc_gold_output_mode", points = 2, description = "Output mode is Complete",
testFunction = lambda: checkOutputMode(historyDF, "Complete"))
gold_tests.test(id = "rc_gold_correct_rows", points = 2, description = "Returns a Dataframe with the correct number of rows",
testFunction = lambda: testDF.count() == Key.goldDF.count())
gold_tests.test(id = "rc_gold_correct_df", points = 2, description = "Returns the correct Dataframe",
testFunction = lambda: compareDataFrames(testDF.sort("week"), Key.goldDF.sort("week")))
daLogger.logTestSuite("Gold Reality Check", registration_id, gold_tests)
gold_tests.displayResults()
finally:
for s in spark.streams.active:
if s.name == 'gold_test':
s.stop()
None
# COMMAND ----------
html_passed = f"""
<html>
<body>
<h2>Congratulations! You're all done!</h2>
While the preliminary evaluation of your project indicates that you have passed, we have a few more validation steps to run on the back-end:<br/>
<ul style="margin:0">
<li> Code & statistical analysis of your capstone project</li>
<li> Correlation of your account in our LMS via your email address, <b>{username}</b></li>
<li> Final preparation of your badge
</ul>
<p>Assuming there are no issues with our last few steps, you will receive your <b>Databricks Developer Essentials Badge</b> within 2 weeks. Notification will be made by email to <b>{username}</b> regarding the availability of your digital badge via <b>Accredible</b>.
Should we have any issues, such as not finding your email address in our LMS, we will do our best to resolve the issue using the email address provided here.
</p>
<p>Your digital badge will be available in a secure, verifiable, and digital format that you can easily retrieve via <b>Accredible</b>. You can then share your achievement via any number of different social media platforms.</p>
<p>If you have questions about the status of your badge after the initial two-week window, or if the email address listed above is incorrect, please <a href="https://help.databricks.com/s/contact-us?ReqType=training" target="_blank">submit a ticket</a> with the subject "Core Capstone" and your Registration ID (<b>{registration_id}</b>) in the message body. Please allow us 3-5 business days to respond.</p>
One final note: In order to comply with <a href="https://oag.ca.gov/privacy/ccpa" target="_blank">CCPA</a> and <a href="https://gdpr.eu/" target="_blank">GDPR</a>, which regulate the collection of your personal information, the status of this capstone and its correlation to your email address will be deleted within 30 days of its submission.
</body>
</html>
"""
html_failed = f"""
<html>
<body>
<h2>Almost There!</h2>
<p>Our preliminary evaluation of your project indicates that you have not passed.</p>
<p>In order for your project to be submitted <b>all</b> reality checks must pass.</p>
<p>In some cases this problem can be resolved by simply clearning the notebook's state (<b>Clear State & Results</b>) and then selecting <b>Run All</b> from the toolbar above.</p>
<p>If your project continues to fail validation, please review each step above to ensure that you are have properly addressed all the corresponding requirements.</p>
</body>
</html>
"""
# Setup Final
def realityCheckFinal():
global final_passed
suite = TestSuite()
suite.testEquals(f"final.static-passed", "Reality Check Bronze passed", static_tests.passed, True)
suite.testEquals(f"final.bronze-passed", "Reality Check Static passed", bronze_tests.passed, True)
suite.testEquals(f"final.silver-passed", "Reality Check Silver passed", silver_tests.passed, True)
suite.testEquals(f"final.final-passed", "Reality Check Gold passed", gold_tests.passed, True)
final_passed = suite.passed
daLogger.logTestSuite("Final Reality Check", registration_id, suite)
daLogger.logAggregation("Capstone", registration_id, TestResultsAggregator)
suite.displayResults()
if final_passed and TestResultsAggregator.passed:
displayHTML(html_passed)
daLogger.logCompletion(registration_id, username)
else:
displayHTML(html_failed)
None
# COMMAND ----------
class CapstoneLogger:
def logTestResult(self, event_id, registration_id, result):
self.logEvent(event_id = event_id,
registration_id = registration_id,
description = result.test.description,
passed = result.passed,
points = result.points,
max_points = result.test.points)
def logTestSuite(self, event_id, registration_id, suite):
self.logEvent(event_id = event_id,
registration_id = registration_id,
description = None,
passed = suite.passed,
points = suite.score,
max_points = suite.maxScore)
def logAggregation(self, event_id, registration_id, aggregate):
self.logEvent(event_id = event_id,
registration_id = registration_id,
description = None,
passed = aggregate.passed,
points = aggregate.score,
max_points = aggregate.maxScore)
def logCompletion(self, registration_id:str, email_address:str):
import time, json, requests
try:
content = {
"registration_id": registration_id,
"email_address": email_address,
}
try:
response = requests.put(
url="https://rqbr3jqop0.execute-api.us-west-2.amazonaws.com/prod/capstone/completed",
json=content,
headers={
"Accept": "application/json; charset=utf-8",
"Content-Type": "application/json; charset=utf-8"
})
assert response.status_code == 200, f"Expected HTTP response code 200, found {response.status_code}"
except requests.exceptions.RequestException as e:
raise Exception("Exception sending message") from e
except Exception as e:
raise Exception("Exception constructing message") from e
def logEvent(self, event_id:str, registration_id:str, description:str, passed:str, points:int, max_points:int):
import time, json, requests
try:
content = {
"module_name": "essentials-capstone-v2",
"lesson_name": "Capstone",
"language": "python",
"event_id": event_id,
"event_time": f"{BI.int(BI.round((time.time() * 1000)))}",
"registration_id": registration_id,
"description": description,
"passed": passed,
"points": points,
"max_points": max_points,
}
try:
response = requests.post(
url="https://rqbr3jqop0.execute-api.us-west-2.amazonaws.com/prod/capstone/status",
json=content,
headers={
"Accept": "application/json; charset=utf-8",
"Content-Type": "application/json; charset=utf-8"
})
assert response.status_code == 200, f"Expected HTTP response code 200, found {response.status_code}"
except requests.exceptions.RequestException as e:
raise Exception("Exception sending message") from e
except Exception as e:
raise Exception("Exception constructing message") from e
daLogger = CapstoneLogger()
None
# COMMAND ----------
# These imports are OK to provide for students
import pyspark
from typing import Callable, Any, Iterable, List, Set, Tuple
import uuid
#############################################
# Test Suite classes
#############################################
# Test case
class TestCase(object):
__slots__=('description', 'testFunction', 'id', 'uniqueId', 'dependsOn', 'escapeHTML', 'points')
def __init__(self,
description:str,
testFunction:Callable[[], Any],
id:str=None,
dependsOn:Iterable[str]=[],
escapeHTML:bool=False,
points:int=1):
self.description=description
self.testFunction=testFunction
self.id=id
self.dependsOn=dependsOn
self.escapeHTML=escapeHTML
self.points=points
# Test result
class TestResult(object):
__slots__ = ('test', 'skipped', 'debug', 'passed', 'status', 'points', 'exception', 'message')
def __init__(self, test, skipped = False, debug = False):
try:
self.test = test
self.skipped = skipped
self.debug = debug
if skipped:
self.status = 'skipped'
self.passed = False
self.points = 0
else:
assert test.testFunction() != False, "Test returned false"
self.status = "passed"
self.passed = True
self.points = self.test.points
self.exception = None
self.message = ""
except Exception as e:
self.status = "failed"
self.passed = False
self.points = 0
self.exception = e
self.message = repr(self.exception)
if (debug and not isinstance(e, AssertionError)):
raise e
# Decorator to lazy evaluate - used by TestSuite
def lazy_property(fn):
'''Decorator that makes a property lazy-evaluated.
'''
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
testResultsStyle = """
<style>
table { text-align: left; border-collapse: collapse; margin: 1em; caption-side: bottom; font-family: Sans-Serif; font-size: 16px}
caption { text-align: left; padding: 5px }
th, td { border: 1px solid #ddd; padding: 5px }
th { background-color: #ddd }
.passed { background-color: #97d897 }
.failed { background-color: #e2716c }
.skipped { background-color: #f9d275 }
.results .points { display: none }
.results .message { display: none }
.results .passed::before { content: "Passed" }
.results .failed::before { content: "Failed" }
.results .skipped::before { content: "Skipped" }
.grade .passed .message:empty::before { content:"Passed" }
.grade .failed .message:empty::before { content:"Failed" }
.grade .skipped .message:empty::before { content:"Skipped" }
</style>
""".strip()
# Test suite class
class TestSuite(object):
def __init__(self) -> None:
self.ids = set()
self.testCases = list()
@lazy_property
def testResults(self) -> List[TestResult]:
return self.runTests()
def runTests(self, debug=False) -> List[TestResult]:
import re
failedTests = set()
testResults = list()
for test in self.testCases:
skip = any(testId in failedTests for testId in test.dependsOn)
result = TestResult(test, skip, debug)
if (not result.passed and test.id != None):
failedTests.add(test.id)
if result.test.id: event_id = "Test-"+result.test.id
elif result.test.description: event_id = "Test-"+re.sub("[^a-zA-Z0-9_]", "", result.test.description)
else: event_id = "Test-"+str(uuid.uuid1())
daLogger.logTestResult(event_id, registration_id, result)
testResults.append(result)
TestResultsAggregator.update(result)
return testResults
def _display(self, cssClass:str="results", debug=False) -> None:
from html import escape
testResults = self.testResults if not debug else self.runTests(debug=True)
lines = []
lines.append(testResultsStyle)
lines.append("<table class='"+cssClass+"'>")
lines.append(" <tr><th class='points'>Points</th><th class='test'>Test</th><th class='result'>Result</th></tr>")
for result in testResults:
resultHTML = "<td class='result "+result.status+"'><span class='message'>"+result.message+"</span></td>"
descriptionHTML = escape(str(result.test.description)) if (result.test.escapeHTML) else str(result.test.description)
lines.append(" <tr><td class='points'>"+str(result.points)+"</td><td class='test'>"+descriptionHTML+"</td>"+resultHTML+"</tr>")
lines.append(" <caption class='points'>Score: "+str(self.score)+"</caption>")
lines.append("</table>")
html = "\n".join(lines)
displayHTML(html)
def displayResults(self) -> None:
self._display("results")
def grade(self) -> int:
self._display("grade")
return self.score
def debug(self) -> None:
self._display("grade", debug=True)
@lazy_property
def score(self) -> int:
return __builtins__.sum(map(lambda result: result.points, self.testResults))
@lazy_property
def maxScore(self) -> int:
return __builtins__.sum(map(lambda result: result.test.points, self.testResults))
@lazy_property
def percentage(self) -> int:
return 0 if self.maxScore == 0 else int(100.0 * self.score / self.maxScore)
@lazy_property
def passed(self) -> bool:
return self.percentage == 100
def addTest(self, testCase: TestCase):
if not testCase.id: raise ValueError("The test cases' id must be specified")
if testCase.id in self.ids: raise ValueError(f"Duplicate test case id: {testCase.id}")
self.testCases.append(testCase)
self.ids.add(testCase.id)
return self
def test(self, id:str, description:str, testFunction:Callable[[], Any], points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testEquals(self, id:str, description:str, valueA, valueB, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: valueA == valueB
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testFloats(self, id:str, description:str, valueA, valueB, tolerance=0.01, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: compareFloats(valueA, valueB, tolerance)
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testRows(self, id:str, description:str, rowA: pyspark.sql.Row, rowB: pyspark.sql.Row, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: compareRows(rowA, rowB)
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testDataFrames(self, id:str, description:str, dfA: pyspark.sql.DataFrame, dfB: pyspark.sql.DataFrame, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: compareDataFrames(dfA, dfB)
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testContains(self, id:str, description:str, listOfValues, value, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: value in listOfValues
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
class __TestResultsAggregator(object):
testResults = dict()
def update(self, result:TestResult):
self.testResults[result.test.id] = result
return result
@lazy_property
def score(self) -> int:
return __builtins__.sum(map(lambda result: result.points, self.testResults.values()))
@lazy_property
def maxScore(self) -> int:
return __builtins__.sum(map(lambda result: result.test.points, self.testResults.values()))
@lazy_property
def percentage(self) -> int:
return 0 if self.maxScore == 0 else int(100.0 * self.score / self.maxScore)
@lazy_property
def passed(self) -> bool:
return self.percentage == 100
def displayResults(self):
displayHTML(testResultsStyle + f"""
<table class='results'>
<tr><th colspan="2">Test Summary</th></tr>
<tr><td>Number of Passing Tests</td><td style="text-align:right">{self.score}</td></tr>
<tr><td>Number of Failing Tests</td><td style="text-align:right">{self.maxScore-self.score}</td></tr>
<tr><td>Percentage Passed</td><td style="text-align:right">{self.percentage}%</td></tr>
</table>
""")
# Lazy-man's singleton
TestResultsAggregator = __TestResultsAggregator()
None
# COMMAND ----------
from pyspark.sql import Row, DataFrame
def returnTrue():
return True
def compareFloats(valueA, valueB, tolerance=0.01):
# Usage: compareFloats(valueA, valueB) (uses default tolerance of 0.01)
# compareFloats(valueA, valueB, tolerance=0.001)
from builtins import abs
try:
if (valueA == None and valueB == None):
return True
else:
return abs(float(valueA) - float(valueB)) <= tolerance
except:
return False
def compareRows(rowA: Row, rowB: Row):
# Usage: compareRows(rowA, rowB)
# compares two Dictionaries
if (rowA == None and rowB == None):
return True
elif (rowA == None or rowB == None):
return False
else:
return rowA.asDict() == rowB.asDict()
def compareDataFrames(dfA: DataFrame, dfB: DataFrame):
from functools import reduce
# Usage: compareDataFrames(dfA, dfB)
if (dfA == None and dfB == None):
return True
else:
n = dfA.count()
if (n != dfB.count()):
return False
kv1 = dfA.rdd.zipWithIndex().map(lambda t : (t[1], t[0])).collectAsMap()
kv2 = dfB.rdd.zipWithIndex().map(lambda t : (t[1], t[0])).collectAsMap()
kv12 = [kv1, kv2]
d = {}
for k in kv1.keys():
d[k] = tuple(d[k] for d in kv12)
return reduce(lambda a, b: a and b, [compareRows(rowTuple[0], rowTuple[1]) for rowTuple in d.values()])
def checkSchema(schemaA, schemaB, keepOrder=True, keepNullable=False):
# Usage: checkSchema(schemaA, schemaB, keepOrder=false, keepNullable=false)
from pyspark.sql.types import StructField
if (schemaA == None and schemaB == None):
return True
elif (schemaA == None or schemaB == None):
return False
else:
schA = schemaA
schB = schemaB
if (keepNullable == False):
schA = [StructField(s.name, s.dataType) for s in schemaA]
schB = [StructField(s.name, s.dataType) for s in schemaB]
if (keepOrder == True):
return [schA] == [schB]
else:
return set(schA) == set(schB)
None
# COMMAND ----------
from pyspark.sql import DataFrame
from pyspark.sql.functions import col, sum
import os
def verifyColumnsExists(df: DataFrame, columnNames):
return all(col in df.columns for col in columnNames)
def findColumnDatatype(df: DataFrame, columnName):
try:
return df.select(columnName).dtypes[0][1]
except Exception as e:
return False
def isDelta(path):
found = False
for file in dbutils.fs.ls(path):
if file.name == "_delta_log/":
found = True
return found
def checkForNulls(df: DataFrame, columnName):
try:
nullCount = df.select(sum(col(columnName).isNull().astype(IntegerType())).alias('nullCount')).collect()[0].nullCount
if (nullCount > 0):
return False
except Exception as e:
return True
def isStreamingDataframe(df: DataFrame):
return df.take(1)[0].operation == "STREAMING UPDATE"
def checkOutputMode(df: DataFrame, mode):
return df.take(1)[0].operationParameters['outputMode'] == mode
print("Finished setting up the capstone environment.")
| 38.544084 | 408 | 0.669586 | 11,477 | 0.345433 | 0 | 0 | 1,173 | 0.035305 | 0 | 0 | 11,113 | 0.334477 |
eb424108a96bf604264def77319d83c190ad7040 | 12,658 | py | Python | scraper/Scraper.py | tiskutis/Capstone24Scraper | 3182463e129f37f0f895a440d2285a51e0cfb9a2 | [
"MIT"
]
| null | null | null | scraper/Scraper.py | tiskutis/Capstone24Scraper | 3182463e129f37f0f895a440d2285a51e0cfb9a2 | [
"MIT"
]
| null | null | null | scraper/Scraper.py | tiskutis/Capstone24Scraper | 3182463e129f37f0f895a440d2285a51e0cfb9a2 | [
"MIT"
]
| null | null | null | import requests
from bs4 import BeautifulSoup as bs, BeautifulSoup
import pandas as pd
import numpy as np
import re
import logging
class Scraper:
"""
This is a scraper class, which can scrape California housing information from https://www.point2homes.com/ website.
The flow:
- First, all California areas are extracted and put into a list.
- Area list is iterated over. Each area has a number of pages with real estate descriptions. User can select how
many pages he wants to go through.
- Scraper visits every real estate link in the page and scrapes required information. After all houses are scraped,
scraper moves to the next page. When no more pages are left or user denoted page limit is reached, scraper
moves to the next category.
"""
def __init__(
self,
logger=logging.basicConfig(
filename="scraping.log", filemode="w", level=logging.DEBUG
),
basic_url: str = "https://www.point2homes.com",
):
"""
Initialization method
:param logger: text file to log events
:param basic_url: url used for to construct new urls.
"""
self.logger = logger
self.basic_url = basic_url
@staticmethod
def get_page(url_: str) -> BeautifulSoup or None:
"""
Gets page HTML from the provided url
:param url_: page you want to scrape from;
:return: get_page() method queries the provided url and returns response, processed with beautiful soup library;
if response is not ok, response status_code is printed and None is returned.
"""
logging.info(f"Getting url: {url_}")
response = requests.get(url_, headers={"User-Agent": "Mozilla/5.0"})
if not response.ok:
logging.error(f"Server response: {response.status_code}")
return None
else:
return bs(response.text, "lxml")
@staticmethod
def get_location_urls(soup: BeautifulSoup) -> list:
"""
Finds all location links in a page and puts them in a list
:param soup: BeautifulSoup object
:return: list with location urls
"""
location_urls_ = []
for elem_ in soup.find_all("a", class_="psrk-events"):
if elem_["href"] not in location_urls_ and "CA" in elem_["href"]:
location_urls_.append(elem_["href"])
return location_urls_
@staticmethod
def get_price(soup: BeautifulSoup) -> float:
"""
Extracts price from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: price of type int or np.nan if not found
"""
try:
price = int(
re.findall(
r"[0-9][0-9,.]+",
soup.find("div", class_="price").get_text().strip(),
)[0].replace(",", "")
)
except Exception as err:
logging.warning(f"Price not found. Error message: {err}")
return np.nan
return price
@staticmethod
def get_bedrooms(soup: BeautifulSoup) -> int or float:
"""
Extracts number of bedrooms from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: number of bedrooms of type int or np.nan if not found
"""
try:
bedrooms = int(
re.findall(
r"\d+", soup.find("li", class_="ic-beds").get_text().strip()
)[0]
)
except Exception as err:
logging.warning(f"Bedroom not found. Error message: {err}")
return np.nan
return bedrooms
@staticmethod
def get_baths(soup: BeautifulSoup) -> int or float:
"""
Extracts number of baths from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: number of baths of type int or np.nan if not found
"""
try:
baths = int(
re.findall(
r"\d+", soup.find("li", class_="ic-baths").get_text().strip()
)[0]
)
except Exception as err:
logging.warning(f"Bath not found. Error message: {err}")
return np.nan
return baths
@staticmethod
def get_sqm(soup: BeautifulSoup) -> float:
"""
Extracts house size in square meters from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: house size in square meters or np.nan if not found
"""
try:
sqm = round(
float(
re.findall(
r"[0-9][0-9,.]+",
soup.find("li", class_="ic-sqft").get_text().strip(),
)[0].replace(",", "")
)
/ 10.764,
2,
)
except Exception as err:
logging.warning(f"Sqm not found. Error message: {err}")
return np.nan
return sqm
@staticmethod
def get_lot_size(soup: BeautifulSoup) -> float:
"""
Extracts lot size in acres from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: lot size in acres or np.nan if not found
"""
try:
lot_size = float(
re.findall(
r"[0-9][0-9,.]+",
soup.find("li", class_="ic-lotsize").get_text().strip(),
)[0]
)
except Exception as err:
logging.warning(f"Lot size not found. Error message: {err}")
return np.nan
return lot_size
@staticmethod
def description_dictionary(soup: BeautifulSoup) -> dict:
"""
Extracts description information, contained in dt and dd elements
:param soup: BeautifulSoup object
:return: dictionary with dt as keys and dd as values
"""
dt_data = soup.find_all("dt")
dd_data = soup.find_all("dd")
description = {}
for dt, dd in zip(dt_data, dd_data):
description[dt.get_text().strip()] = dd.get_text().strip()
return description
@staticmethod
def demographics_dictionary(soup: BeautifulSoup) -> dict:
"""
Extracts demographics information, contained in td
:param soup: BeautifulSoup object
:return: dictionary with demographics in that area keys (e.g. median income, median age) and values
"""
demographics = soup.find("div", {"id": "demographics_content"}).find_all("td")
demographics_ = {}
for i in range(0, len(demographics), 2):
demographics_[demographics[i].get_text()] = demographics[i + 1].get_text()
return demographics_
def scrape_info_one_house(self, soup: BeautifulSoup) -> dict or None:
"""
Accepts soup object which contains all the required information about one house.
Scrapes house type, year built, parking spaces, area population, median age, total households,
median year built, median household income, number of baths and bedrooms, size in square meters, lot size in
acres and price.
:param soup: BeautifulSoup object
:return: dictionary with all the required info
"""
house_information = {}
try:
description = self.description_dictionary(soup)
demographics = self.demographics_dictionary(soup)
house_information["Type"] = description["Type"]
house_information["Year Built"] = description["Year Built"]
house_information["Parking Spaces"] = int(
re.findall(r"\d+", description["Parking info"])[0]
)
house_information["Area population"] = int(
demographics["Total population"].replace(",", "")
)
house_information["Median age"] = demographics["Median age"]
house_information["Total households"] = int(
demographics["Total households"].replace(",", "")
)
house_information["Median year built"] = demographics["Median year built"]
house_information["Median household income"] = int(
demographics["Median household income"].replace(",", "")
)
house_information["Bedrooms"] = self.get_bedrooms(soup)
house_information["Baths"] = self.get_baths(soup)
house_information["Square Meters"] = self.get_sqm(soup)
house_information["Lot size (acres)"] = self.get_lot_size(soup)
house_information["Price"] = self.get_price(soup)
return house_information
except Exception as err:
logging.warning(
f"Some of the required information was missing for this house. Error message: {err}"
)
return None
def get_houses_in_location(
self,
location_url_: str,
houses_in_location: set = set(),
page_limit: int = 1,
page_number: int = 1,
) -> list:
"""
Accepts location url and goes through pages in that location scraping every house
until page limit is reached. Returns list of dicts with scraped information about every house in that location.
:param location_url_: string with link to specific location in California state
:param houses_in_location: set with already scraped links. Since retrieved links can be repetitive, there is
no need to go to the same link which has already been scraped. Set is used for faster search
:param page_limit: how many pages to scraped. If not passed by the user, default is 1
:param page_number: Current page to scrape. Starting number is 1
:return: list of dictionaries
"""
houses_information = []
try:
new_url = self.basic_url + location_url_ + f"?page={page_number}"
page_ = self.get_page(new_url)
if page_.find_all("li", class_="lslide"):
for elem in page_.find_all("li", class_="lslide"):
link = elem.find("a")["href"]
if link.startswith("/US") and link not in houses_in_location:
houses_information.append(
self.scrape_info_one_house(
self.get_page(self.basic_url + link)
)
)
houses_in_location.add(link)
if page_number <= page_limit:
page_number += 1
self.get_houses_in_location(
location_url_,
houses_in_location,
page_limit,
page_number=page_number,
)
except Exception as err:
logging.error(f"Error occurred while scraping locations. Message: {err}")
return houses_information
def scrape_platform(self, page_limit: int = 1) -> None:
"""
Main scraping function. Accepts page limit - how many pages to scrape, default is 1.
The flow:
- First, all California areas (locations) are extracted and put into a list.
- Area list is iterated over. Each area has a number of pages with real estate descriptions. User can select how
many pages he wants to go through.
- Scraper visits every real estate link in the page and scrapes required information. After all houses are scraped,
scraper moves to the next page. When no more pages are left or user denoted page limit is reached, scraper
moves to the next category.
:param page_limit: how many pages to scrape per area
:return: None.
"""
starting_url = "https://www.point2homes.com/US/Real-Estate-Listings/CA.html"
houses = []
starting_page = self.get_page(starting_url)
locations = self.get_location_urls(starting_page)
for location in locations:
houses.extend(
self.get_houses_in_location(location, set(), page_limit=page_limit)
)
self.to_dataframe(houses).to_csv("California Housing.csv")
@staticmethod
def to_dataframe(house_list: list) -> pd.DataFrame:
"""
Filters out None values and converts the list to pandas DataFrame
:param house_list: list of dictionaries
:return: pandas DataFrame
"""
return pd.DataFrame([house for house in house_list if house is not None])
| 38.241692 | 123 | 0.586902 | 12,524 | 0.989414 | 0 | 0 | 5,874 | 0.464054 | 0 | 0 | 5,903 | 0.466345 |
eb42e8c815ef79c9ee2b0e9d574f89c917610639 | 693 | py | Python | ArticleSpider/ArticleSpider/utils/selenium_spider.py | ms-wu/Scrapy_projects | 376eb5e1c6eca54bcfb781170513c8e9d3476fec | [
"MIT"
]
| null | null | null | ArticleSpider/ArticleSpider/utils/selenium_spider.py | ms-wu/Scrapy_projects | 376eb5e1c6eca54bcfb781170513c8e9d3476fec | [
"MIT"
]
| null | null | null | ArticleSpider/ArticleSpider/utils/selenium_spider.py | ms-wu/Scrapy_projects | 376eb5e1c6eca54bcfb781170513c8e9d3476fec | [
"MIT"
]
| null | null | null | from selenium import webdriver
from scrapy.selector import Selector
import time
chrome_opt = webdriver.ChromeOptions()
prefs = {"profile.managed_default_content_settings.images": 2}
chrome_opt.add_experimental_option("prefs", prefs)
browser = webdriver.Chrome(executable_path="H:\chromedriver.exe", chrome_options=chrome_opt)
browser.get("https://www.taobao.com")
# time.sleep(5)
# browser.find_element_by_css_selector()
# t_selector = Selector(text=browser.page_source)
# t_selector.css()
# for i in range(3):
# browser.execute_script("window.scrollTo(0, document.body.scrollHeight); var lenOfPage=document.body.scrollHeight; return lenOfPage;")
# time.sleep(3)
# browser.quit() | 31.5 | 139 | 0.780664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 417 | 0.601732 |
eb4407cbcc3f00735c03c065582c4a89413734d8 | 1,678 | py | Python | launcher.py | dlario/PyFlow | b53b9d14b37aa586426d85842c6cd9a9c35443f2 | [
"MIT"
]
| null | null | null | launcher.py | dlario/PyFlow | b53b9d14b37aa586426d85842c6cd9a9c35443f2 | [
"MIT"
]
| null | null | null | launcher.py | dlario/PyFlow | b53b9d14b37aa586426d85842c6cd9a9c35443f2 | [
"MIT"
]
| null | null | null | from nine import str
from Qt.QtWidgets import QApplication, QStyleFactory
from Qt import QtGui
from Qt import QtCore
import sys
import os
from PyFlow.App import PyFlow
FILE_DIR = os.path.abspath(os.path.dirname(__file__))
SETTINGS_PATH = os.path.join(FILE_DIR, "PyFlow", "appConfig.ini")
STYLE_PATH = os.path.join(FILE_DIR, "PyFlow", "style.css")
app = QApplication(sys.argv)
app.setStyle(QStyleFactory.create("plastique"))
dark_palette = app.palette()
dark_palette.setColor(QtGui.QPalette.Window, QtGui.QColor(53, 53, 53))
dark_palette.setColor(QtGui.QPalette.WindowText, QtCore.Qt.white)
dark_palette.setColor(QtGui.QPalette.Base, QtGui.QColor(25, 25, 25))
dark_palette.setColor(QtGui.QPalette.AlternateBase, QtGui.QColor(53, 53, 53))
dark_palette.setColor(QtGui.QPalette.ToolTipBase, QtCore.Qt.white)
dark_palette.setColor(QtGui.QPalette.ToolTipText, QtCore.Qt.white)
dark_palette.setColor(QtGui.QPalette.Text, QtCore.Qt.black)
dark_palette.setColor(QtGui.QPalette.Button, QtGui.QColor(53, 53, 53))
dark_palette.setColor(QtGui.QPalette.ButtonText, QtCore.Qt.black)
dark_palette.setColor(QtGui.QPalette.BrightText, QtCore.Qt.red)
dark_palette.setColor(QtGui.QPalette.Link, QtGui.QColor(42, 130, 218))
dark_palette.setColor(QtGui.QPalette.Highlight, QtGui.QColor(42, 130, 218))
dark_palette.setColor(QtGui.QPalette.HighlightedText, QtCore.Qt.black)
app.setPalette(dark_palette)
try:
with open(STYLE_PATH, 'r') as f:
styleString = f.read()
app.setStyleSheet(styleString)
except Exception as e:
print(e)
instance = PyFlow.instance()
app.setActiveWindow(instance)
instance.show()
try:
sys.exit(app.exec_())
except Exception as e:
print(e)
| 33.56 | 77 | 0.781883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.033373 |
eb444f1d2f4c6079bc153578e3e68294eef319a0 | 4,344 | py | Python | src/gapminder_challenge/dashboard/dash_app2.py | UBC-MDS/gapminder_challenge | bbc8132a475d483e7c6c46572c8efca40b506afc | [
"MIT"
]
| 1 | 2022-03-19T03:31:49.000Z | 2022-03-19T03:31:49.000Z | src/gapminder_challenge/dashboard/dash_app2.py | imtvwy/gapminder_challenge | 0f7d9816b0c5baf6422baff24e0413c800d6e62a | [
"MIT"
]
| 39 | 2022-02-17T05:04:48.000Z | 2022-03-19T21:37:20.000Z | src/gapminder_challenge/dashboard/dash_app2.py | imtvwy/gapminder_challenge | 0f7d9816b0c5baf6422baff24e0413c800d6e62a | [
"MIT"
]
| 1 | 2022-03-19T03:30:08.000Z | 2022-03-19T03:30:08.000Z | import pandas as pd
from dash import Dash, html, dcc, Input, Output
import altair as alt
df = pd.read_csv('../../data/raw/world-data-gapminder_raw.csv') # local run
# df = pd.read_csv('data/raw/world-data-gapminder_raw.csv') # heroku deployment
url = '/dash_app2/'
def add_dash(server):
"""
It creates a Dash app that plots a line chart of children per woman from gapminder dataset
with 2 widgets : rangeslider for years and dropdown for filter
:param server: The Flask app object
:return: A Dash server
"""
app = Dash(server=server, url_base_pathname=url)
app.layout = html.Div([
html.Iframe(
id='line_children',
style={'border-width': '0', 'width': '600px', 'height': '400px', 'display': 'block',
'margin-left': 'auto', 'margin-right': 'auto'}),
html.Label([
'Zoom in years: ',
dcc.RangeSlider(1918, 2018, 10, value=[1918, 2018], id='year_range_slider',
marks={str(year): str(year) for year in range(1918, 2028, 10)}),
]),
html.Label([
'See breakdown number by: ',
dcc.Dropdown(options=[
{'label': 'All', 'value': 'all'},
{'label': 'Income Group', 'value': 'income_group'},
{'label': 'Region', 'value': 'region'}
],
value='', id='filter_dropdown')
]),
html.Div(id="data_card_2", **{'data-card_2_data': []})
])
# Set up callbacks/backend
@app.callback(
Output('line_children', 'srcDoc'),
Input('year_range_slider', 'value'),
Input('filter_dropdown', 'value')
)
def update_line(year_range_slider, filter_dropdown):
"""
The function takes in a year range and filter option and outputs the line chart per children
for that year range with the filter
:param year_range_slider: The year range to plot
:param filter_dropdown: The filter to plot
:return: The Altair chart is being returned.
"""
filter = filter_dropdown
title_params = alt.TitleParams("Average Number of Children", subtitle=[
"Click on legend entries to mute the corresponding lines"])
if filter == "all" or filter == '':
df_by_year = df.groupby(["year"]).mean()
df_by_year = df_by_year.reset_index()
chart = alt.Chart(df_by_year.query(f'year>={year_range_slider[0]} and year<={year_range_slider[1]}'),
title="Average Number of Children").mark_line().encode(
y=alt.Y("children_per_woman", title="Children per woman"),
x=alt.X("year", title="Year"),
strokeWidth=alt.value(3),
tooltip=['year', 'children_per_woman']).interactive()
else:
# group by filter field and then year to get the average
df_by_year = df.groupby([filter, "year"]).mean()
df_by_year = df_by_year.reset_index()
# add interactive click
click = alt.selection_multi(fields=[filter], bind='legend')
chart = alt.Chart(df_by_year.query(f'year>={year_range_slider[0]} and year<={year_range_slider[1]}'),
title=title_params).mark_line().encode(
y=alt.Y("children_per_woman", title="Children per woman"),
x=alt.X("year", title="Year"),
strokeWidth=alt.value(3),
# color=filter,
color=alt.Color(filter, title=filter.replace('_', ' ').title()),
opacity=alt.condition(click, alt.value(0.9), alt.value(0.2)),
tooltip=['year', 'children_per_woman']).interactive().add_selection(click)
return chart.to_html()
@app.callback(
Output('data_card_2', 'data-card_2_data'),
Input('filter_dropdown', 'value'))
def get_data(filter_dropdown="income_group"):
if filter_dropdown == '':
filter_dropdown = 'income_group'
df_by_year = df.groupby([filter_dropdown, "year"]).mean()
df_viz = df_by_year.reset_index()
df_viz = df_viz[[filter_dropdown, 'year', 'children_per_woman']]
df_viz = df_viz.to_json()
return (df_viz)
return app.server
| 42.174757 | 113 | 0.575506 | 0 | 0 | 0 | 0 | 2,773 | 0.638352 | 0 | 0 | 1,781 | 0.409991 |
eb448a448b8928b4d93cd021756f058d5d672505 | 4,595 | py | Python | emulator/utils/common.py | Harry45/emuPK | c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9 | [
"MIT"
]
| 2 | 2021-05-10T16:59:34.000Z | 2021-05-19T16:10:24.000Z | emulator/utils/common.py | Harry45/emuPK | c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9 | [
"MIT"
]
| null | null | null | emulator/utils/common.py | Harry45/emuPK | c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9 | [
"MIT"
]
| 2 | 2021-04-16T23:55:16.000Z | 2021-09-09T12:48:41.000Z | # Author: Arrykrishna Mootoovaloo
# Collaborators: Alan Heavens, Andrew Jaffe, Florent Leclercq
# Email : [email protected]
# Affiliation : Imperial Centre for Inference and Cosmology
# Status : Under Development
'''
Perform all additional operations such as interpolations
'''
import os
import logging
import numpy as np
import scipy.interpolate as itp
from typing import Tuple
def indices(nzmax: int) -> Tuple[list, tuple]:
'''
Generates indices for double sum power spectra
:param: nzmax (int) - the maximum number of redshifts (assuming first redshift is zero)
:return: di_ee (list), idx_gi (tuple) - double indices for EE and indices for GI
'''
# create emty lists to recod all indices
# for EE power spectrum
di_ee = []
# for GI power spectrum
# ab means alpha, beta
Lab_1 = []
Lab_2 = []
Lba_1 = []
Lba_2 = []
for i in range(1, nzmax + 1):
for j in range(1, nzmax + 1):
di_ee.append(np.min([i, j]))
if i > j:
Lab_1.append(i)
Lab_2.append(j)
elif j > i:
Lba_1.append(i)
Lba_2.append(j)
Lab_1 = np.asarray(Lab_1)
Lab_2 = np.asarray(Lab_2)
Lba_1 = np.asarray(Lba_1)
Lba_2 = np.asarray(Lba_2)
di_ee = np.asarray(di_ee)
idx_gi = (Lab_1, Lab_2, Lba_1, Lba_2)
return di_ee, idx_gi
def dvalues(d: dict) -> np.ndarray:
'''
Returns an array of values instead of dictionary format
:param: d (dict) - a dictionary with keys and values
:return: v (np.ndarray) - array of values
'''
v = np.array(list(d.values()))
return v
def like_interp_2d(inputs: list, int_type: str = 'cubic') -> object:
'''
We want to predict the function for any new point of k and z (example)
:param: inputs (list) - a list containing x, y, f(x,y)
:param: int_type (str) - interpolation type (default: 'cubic')
:return: f (object) - the interpolator
'''
k, z, f_kz = np.log(inputs[0]), inputs[1], inputs[2]
inputs_trans = [k, z, f_kz]
f = itp.interp2d(*inputs_trans)
return f
def two_dims_interpolate(inputs: list, grid: list) -> np.ndarray:
'''
Function to perform 2D interpolation using interpolate.interp2d
:param: inputs (list) : inputs to the interpolation module, that is, we need to specify the following:
- x
- y
- f(x,y)
- 'linear', 'cubic', 'quintic'
:param: grid (list) : a list containing xnew and ynew
:return: pred_new (np.ndarray) : the predicted values on the 2D grid
'''
# check that all elements are greater than 0 for log-transformation to be used
condition = np.all(inputs[2] > 0)
if condition:
# transform k and f to log
k, z, f_kz, int_type = np.log(inputs[0]), inputs[1], np.log(inputs[2]), inputs[3]
else:
# transform in k to log
k, z, f_kz, int_type = np.log(inputs[0]), inputs[1], inputs[2], inputs[3]
inputs_trans = [k, z, f_kz, int_type]
# tranform the grid to log
knew, znew = np.log(grid[0]), grid[1]
grid_trans = [knew, znew]
f = itp.interp2d(*inputs_trans)
if condition:
pred_new = np.exp(f(*grid_trans))
else:
pred_new = f(*grid_trans)
return pred_new
def interpolate(inputs: list) -> np.ndarray:
'''
Function to interpolate the power spectrum along the redshift axis
:param: inputs (list or tuple) : x values, y values and new values of x
:return: ynew (np.ndarray) : an array of the interpolated power spectra
'''
x, y, xnew = inputs[0], inputs[1], inputs[2]
spline = itp.splrep(x, y)
ynew = itp.splev(xnew, spline)
return ynew
def get_logger(name: str, log_name: str, folder_name: str = 'logs'):
'''
Create a log file for each Python scrip
:param: name (str) - name of the Python script
:param: log_name (str) - name of the output log file
'''
# create the folder if it does not exist
if not os.path.exists(folder_name):
os.makedirs(folder_name)
log_format = '%(asctime)s %(name)8s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.DEBUG,
format=log_format,
filename=folder_name + '/' + log_name + '.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(name).addHandler(console)
return logging.getLogger(name)
| 24.972826 | 106 | 0.618498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,141 | 0.465941 |
eb458b4c5c0f75854528fff96d2061d078c5cbe7 | 2,984 | py | Python | pypy/translator/microbench/pybench/Imports.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
]
| 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/translator/microbench/pybench/Imports.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
]
| null | null | null | pypy/translator/microbench/pybench/Imports.py | woodrow/pyoac | b5dc59e6a38e7912db47f26fb23ffa4764a3c0e7 | [
"MIT"
]
| 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | from pybench import Test
# First imports:
import os
import package.submodule
class SecondImport(Test):
version = 0.1
operations = 5 * 5
rounds = 20000
def test(self):
for i in xrange(self.rounds):
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
import os
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondPackageImport(Test):
version = 0.1
operations = 5 * 5
rounds = 20000
def test(self):
for i in xrange(self.rounds):
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
import package
def calibrate(self):
for i in xrange(self.rounds):
pass
class SecondSubmoduleImport(Test):
version = 0.1
operations = 5 * 5
rounds = 20000
def test(self):
for i in xrange(self.rounds):
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
import package.submodule
def calibrate(self):
for i in xrange(self.rounds):
pass
| 21.314286 | 37 | 0.515416 | 2,862 | 0.959115 | 0 | 0 | 0 | 0 | 0 | 0 | 16 | 0.005362 |
de16d40373757db432c5c7a3e7d57eeddc1025cc | 1,745 | py | Python | tests/test_generators_rst.py | dbaty/soho | 3fe67d3dc52919751217d6e73be436c3e291ab04 | [
"BSD-3-Clause"
]
| null | null | null | tests/test_generators_rst.py | dbaty/soho | 3fe67d3dc52919751217d6e73be436c3e291ab04 | [
"BSD-3-Clause"
]
| 1 | 2015-10-11T10:34:08.000Z | 2015-10-11T10:34:08.000Z | tests/test_generators_rst.py | dbaty/soho | 3fe67d3dc52919751217d6e73be436c3e291ab04 | [
"BSD-3-Clause"
]
| null | null | null | from unittest import TestCase
class TestRSTGenerator(TestCase):
def _make_one(self):
from soho.generators.rst import RSTGenerator
return RSTGenerator()
def _call_generate(self, filename):
import os.path
generator = self._make_one()
here = os.path.dirname(__file__)
path = os.path.join(here, 'fixtures', filename)
return generator.generate(path)
def test_basics(self):
meta, html = self._call_generate('test1.rst')
self.assertEqual(meta, {})
self.assertEqual(html, '<p>This is a <strong>test</strong>.</p>')
def test_with_metadata_in_rst_file(self):
meta, html = self._call_generate('test2.rst')
self.assertEqual(meta, {'foo': 'Value of foo'})
self.assertEqual(html, '<p>This is another <strong>test</strong>.</p>')
def test_with_metadata_in_file(self):
meta, html = self._call_generate('test3.rst')
self.assertEqual(meta, {'foo': 'Inherited value of foo',
'bar': 'Overriden value of bar'})
self.assertEqual(html, '<p>This is another <strong>test</strong>.</p>')
def test_sphinx_directives(self):
meta, html = self._call_generate('test-code-block.rst')
expected = (
'<div class="highlight-python">'
'<table class="highlighttable"><tr>'
'<td class="linenos">'
'<div class="linenodiv"><pre>1</pre></div></td>'
'<td class="code">'
'<div class="highlight"><pre><span class="k">print</span> '
'<span class="s">'foo'</span>\n</pre></div>\n</td>'
'</tr>'
'</table>'
'</div>')
self.assertEqual(html, expected)
| 37.12766 | 79 | 0.581089 | 1,712 | 0.981089 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.330086 |
de170bec53f0702af41038f426ab0305ba516d45 | 206 | py | Python | wagtail_ab_testing/test/apps.py | alxbridge/wagtail-ab-testing | 1e959cc4ea1fa9b6d9adda2525fc3aae8e8b7807 | [
"BSD-3-Clause"
]
| 14 | 2021-02-19T08:52:37.000Z | 2022-03-16T05:16:38.000Z | wagtail_ab_testing/test/apps.py | alxbridge/wagtail-ab-testing | 1e959cc4ea1fa9b6d9adda2525fc3aae8e8b7807 | [
"BSD-3-Clause"
]
| 10 | 2021-04-09T16:16:17.000Z | 2022-03-31T17:30:18.000Z | wagtail_ab_testing/test/apps.py | alxbridge/wagtail-ab-testing | 1e959cc4ea1fa9b6d9adda2525fc3aae8e8b7807 | [
"BSD-3-Clause"
]
| 11 | 2021-04-23T15:19:06.000Z | 2022-03-28T16:15:14.000Z | from django.apps import AppConfig
class WagtailAbTestingTestAppConfig(AppConfig):
label = "wagtail_ab_testing_test"
name = "wagtail_ab_testing.test"
verbose_name = "Wagtail A/B Testing tests"
| 25.75 | 47 | 0.771845 | 169 | 0.820388 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.373786 |
de188ec6c9675e889154db140be0ba41e013c1c2 | 835 | py | Python | shc/__init__.py | fabaff/smarthomeconnect | 611cd0f372d03b5fc5798a2a9a5f962d1da72799 | [
"Apache-2.0"
]
| 5 | 2021-07-02T21:48:45.000Z | 2021-12-12T21:55:42.000Z | shc/__init__.py | fabaff/smarthomeconnect | 611cd0f372d03b5fc5798a2a9a5f962d1da72799 | [
"Apache-2.0"
]
| 49 | 2020-09-18T20:05:55.000Z | 2022-03-05T19:51:33.000Z | shc/__init__.py | fabaff/smarthomeconnect | 611cd0f372d03b5fc5798a2a9a5f962d1da72799 | [
"Apache-2.0"
]
| 1 | 2021-12-10T14:50:43.000Z | 2021-12-10T14:50:43.000Z | # Copyright 2020 Michael Thies <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
from . import base
from . import supervisor
from . import variables
from . import datatypes
from . import conversion
from . import timer
from .base import handler, blocking_handler
from .variables import Variable
from .supervisor import main
| 34.791667 | 120 | 0.777246 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 579 | 0.693413 |
de189e363b7152e9b3189460e86a65764a35ef55 | 65 | py | Python | minicds/setup_package.py | NESSAN-PMO/miniCDS | 82f08dfbb9259c78286679a0be875a66e2bedfe8 | [
"BSD-3-Clause"
]
| null | null | null | minicds/setup_package.py | NESSAN-PMO/miniCDS | 82f08dfbb9259c78286679a0be875a66e2bedfe8 | [
"BSD-3-Clause"
]
| 1 | 2020-10-29T19:56:05.000Z | 2020-10-29T19:56:05.000Z | minicds/setup_package.py | NESSAN-PMO/miniCDS | 82f08dfbb9259c78286679a0be875a66e2bedfe8 | [
"BSD-3-Clause"
]
| null | null | null |
def get_package_data():
return {"minicds":['minicds.cfg']}
| 13 | 38 | 0.646154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.338462 |
de1a03c3bf2d4b4418706f4fb2057bc7977a7251 | 777 | py | Python | client.py | juzejunior/HttpBasicServer | 7e77b49f693d9cfe0d782e93026d8f9261368b69 | [
"MIT"
]
| null | null | null | client.py | juzejunior/HttpBasicServer | 7e77b49f693d9cfe0d782e93026d8f9261368b69 | [
"MIT"
]
| null | null | null | client.py | juzejunior/HttpBasicServer | 7e77b49f693d9cfe0d782e93026d8f9261368b69 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Simple Http Client, to request html files
Modification: 11/09/2017
Author: J. Júnior
'''
import httplib
import sys
#get http server ip - pass in the command line
http_server = sys.argv[1]
#create a connection with the server
conn = httplib.HTTPConnection(http_server)
while 1:
cmd = raw_input('input command (ex. GET index.html): ')
cmd = cmd.split()
if cmd[0] == 'exit': #type exit to end it
break
#request command to server
conn.request(cmd[0], cmd[1])
#get response from server
rsp = conn.getresponse()
#print server response and data
print(rsp.status, rsp.reason)
print(rsp.getheaders())
data_received = rsp.read()
print(data_received)
#close connection
conn.close()
| 22.852941 | 58 | 0.679537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 394 | 0.506427 |
de1d5ad5042762573fde2a3a38799da995504ae1 | 6,881 | py | Python | pyssh/crypto/asymmetric.py | beckjake/pyssh | d6b7a6cca7e38d0835f84386723ec10ac5ad621f | [
"CC0-1.0"
]
| null | null | null | pyssh/crypto/asymmetric.py | beckjake/pyssh | d6b7a6cca7e38d0835f84386723ec10ac5ad621f | [
"CC0-1.0"
]
| null | null | null | pyssh/crypto/asymmetric.py | beckjake/pyssh | d6b7a6cca7e38d0835f84386723ec10ac5ad621f | [
"CC0-1.0"
]
| null | null | null | """Implement asymmetric cryptography.
"""
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, dsa, utils, padding
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.backends import default_backend
from collections import OrderedDict
import io
from builtins import int #pylint: disable=redefined-builtin
from pyssh.constants import ENC_SSH_RSA, ENC_SSH_DSS
from pyssh.base_types import String, MPInt
# pylint:disable=invalid-name
class UnsupportedKeyProtocol(Exception):
"""Key protocol not supported."""
class InvalidAlgorithm(Exception):
"""Mismatched algorithm"""
#TODO: ECDSA (RFC 5656)
class BaseAlgorithm(object):
"""The base algorithm. Has private keys and/or public keys and does
signature creation and/or verification.
"""
FORMAT_STR = None
PUBKEY_CLASS = None
PRIVKEY_CLASS = None
def __init__(self, privkey=None, pubkey=None):
self._privkey = None
self.privkey = privkey
self.pubkey = pubkey
@property
def privkey(self):
"""Getter for the private key."""
return self._privkey
@privkey.setter
def privkey(self, value):
"""When setting the private key, also set the public key to match."""
self._privkey = value
if value:
self.pubkey = value.public_key()
def unpack_pubkey(self, stream):
"""Unpack a public key from a stream."""
raise NotImplementedError('not implemented')
def pack_pubkey(self):
"""Pack a public key into bytes."""
raise NotImplementedError('not implemented')
@classmethod
def _check_keytype(cls, stream):
"""Verify that the keytype from the stream is the expected one."""
keytype = String.unpack_from(stream)
if cls.FORMAT_STR != keytype:
msg = 'Got {!r}, expected {!r}'.format(keytype, cls.FORMAT_STR)
raise InvalidAlgorithm(msg)
def verify_signature(self, signature, data):
"""Verify the signature against the given data. Pubkey must be set."""
raise NotImplementedError('not implemented')
def sign(self, data):
"""Sign some data. Privkey must be set."""
raise NotImplementedError('not implemented')
def read_pubkey(self, data):
"""Read a public key from data in the ssh public key format.
:param bytes data: the data to read.
Sets self.pubkey.
"""
pubkey = serialization.load_ssh_public_key(data, default_backend())
assert isinstance(pubkey.public_numbers(), self.PUBKEY_CLASS)
self.pubkey = pubkey
def read_privkey(self, data, password=None):
"""Read a PEM-encoded private key from data. If a password is set, it
will be used to decode the key.
:param bytes data: the data to read
:param bytes password: The password.
Sets self.privkey.
"""
privkey = serialization.load_pem_private_key(data, password,
default_backend())
assert isinstance(privkey.private_numbers(), self.PRIVKEY_CLASS)
self.privkey = privkey
class RSAAlgorithm(BaseAlgorithm):
"""Support for the RSA algorithm."""
FORMAT_STR = String(ENC_SSH_RSA)
PRIVKEY_CLASS = rsa.RSAPrivateNumbers
PUBKEY_CLASS = rsa.RSAPublicNumbers
def unpack_pubkey(self, stream):
self._check_keytype(stream)
e = MPInt.unpack_from(stream).value
n = MPInt.unpack_from(stream).value
self.pubkey = rsa.RSAPublicNumbers(e, n).public_key(default_backend())
def pack_pubkey(self):
return b''.join([
self.FORMAT_STR.pack(),
MPInt(self.pubkey.public_numbers().e).pack(),
MPInt(self.pubkey.public_numbers().n).pack()
])
def verify_signature(self, signature, data):
stream = io.BytesIO(signature)
self._check_keytype(stream)
blob = String.unpack_from(stream).value
verifier = self.pubkey.verifier(
blob,
padding.PKCS1v15(),
hashes.SHA1()
)
verifier.update(data)
verifier.verify()
def sign(self, data):
signer = self.privkey.signer(
PKCS1v15(),
hashes.SHA1()
)
signer.update(data)
signed = signer.finalize()
return b''.join([
self.FORMAT_STR.pack(),
String(signed).pack()
])
class DSAAlgorithm(BaseAlgorithm):
"""Support for the DSA."""
FORMAT_STR = String(ENC_SSH_DSS)
PRIVKEY_CLASS = dsa.DSAPrivateNumbers
PUBKEY_CLASS = dsa.DSAPublicNumbers
def unpack_pubkey(self, stream):
self._check_keytype(stream)
p = MPInt.unpack_from(stream)
q = MPInt.unpack_from(stream)
g = MPInt.unpack_from(stream)
params = dsa.DSAParameterNumbers(p.value, q.value, g.value)
y = MPInt.unpack_from(stream)
pubnums = dsa.DSAPublicNumbers(y.value, params)
self.pubkey = pubnums.public_key(default_backend())
def pack_pubkey(self):
pubnums = self.pubkey.public_numbers()
return b''.join([
self.FORMAT_STR.pack(),
MPInt(pubnums.parameter_numbers.p).pack(),
MPInt(pubnums.parameter_numbers.q).pack(),
MPInt(pubnums.parameter_numbers.g).pack(),
MPInt(pubnums.y).pack(),
])
def verify_signature(self, signature, data):
stream = io.BytesIO(signature)
self._check_keytype(stream)
blob = String.unpack_from(stream).value
# convert to rfc6979 signature
blob = utils.encode_rfc6979_signature(
r=int.from_bytes(blob[:20], 'big'),
s=int.from_bytes(blob[20:], 'big')
)
verifier = self.pubkey.verifier(
blob,
hashes.SHA1()
)
verifier.update(data)
verifier.verify()
def sign(self, data):
signer = self.privkey.signer(
hashes.SHA1()
)
signer.update(data)
signed = signer.finalize()
r, s = utils.decode_rfc6979_signature(signed)
return b''.join([
self.FORMAT_STR.pack(),
String(int(r).to_bytes(20, 'big') + int(s).to_bytes(20, 'big')).pack(),
])
PUBLIC_KEY_PROTOCOLS = OrderedDict((
(ENC_SSH_RSA, RSAAlgorithm),
(ENC_SSH_DSS, DSAAlgorithm)
))
def get_asymmetric_algorithm(keytype):
"""Get the referenced public key type. If a signature_blob blob is included,
validate it.
"""
try:
handler = PUBLIC_KEY_PROTOCOLS[keytype]
except KeyError:
raise UnsupportedKeyProtocol(keytype)
return handler()
| 31.277273 | 83 | 0.636826 | 5,786 | 0.840866 | 0 | 0 | 642 | 0.0933 | 0 | 0 | 1,360 | 0.197646 |
de1dfa963d73dc87e79e92fa3fe653f6462539c8 | 1,230 | py | Python | books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py | haohonglin/DeepLearning-1 | c00eee4738d322f6eb5d61d5bafbcfa7b20152a0 | [
"Apache-2.0"
]
| 1 | 2020-12-01T06:13:21.000Z | 2020-12-01T06:13:21.000Z | books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py | idonashino/DeepLearning | c00eee4738d322f6eb5d61d5bafbcfa7b20152a0 | [
"Apache-2.0"
]
| null | null | null | books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py | idonashino/DeepLearning | c00eee4738d322f6eb5d61d5bafbcfa7b20152a0 | [
"Apache-2.0"
]
| 1 | 2021-01-01T15:28:36.000Z | 2021-01-01T15:28:36.000Z | """
@ jetou
@ cart decision_tree
@ date 2017 10 31
"""
import numpy as np
class naive_bayes:
def __init__(self, feature, label):
self.feature = feature.transpose()
self.label = label.transpose().flatten(1)
self.positive = np.count_nonzero(self.label == 1) * 1.0
self.negative = np.count_nonzero(self.label == -1) * 1.0
def train(self):
positive_dict = {}
negative_dict = {}
for i in self.feature:
unqiue = set(i)
for j in unqiue:
positive_dict[j] = np.count_nonzero(self.label[i==j]==1) / self.positive
negative_dict[j] = np.count_nonzero(self.label[i==j]==-1) / self.negative
return positive_dict, negative_dict
def prediction(self, pre_feature):
positive_chance = self.positive / self.label.shape[0]
negative_chance = self.negative / self.label.shape[0]
positive_dict, negative_dict = self.train()
for i in pre_feature:
i = str(i)
positive_chance *= positive_dict[i]
negative_chance *= negative_dict[i]
if positive_chance > negative_chance:
return 1
else:
return -1
| 28.604651 | 89 | 0.585366 | 1,139 | 0.926016 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.054472 |
de1e40b74da53919bbdc4c6c8dda38d5aba2c247 | 27 | py | Python | src/__init__.py | natrodrigues/face-recognition | 00c78bea55d2738913cf5475056c2faf05fe960e | [
"MIT"
]
| null | null | null | src/__init__.py | natrodrigues/face-recognition | 00c78bea55d2738913cf5475056c2faf05fe960e | [
"MIT"
]
| null | null | null | src/__init__.py | natrodrigues/face-recognition | 00c78bea55d2738913cf5475056c2faf05fe960e | [
"MIT"
]
| null | null | null | from . import frame_manager | 27 | 27 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
de1e4247762eb410a1475e5659c71d8d5fb3aa3a | 276 | py | Python | sparweltbitool/config.py | checkout-charlie/bitool | e41ce66ab2b88992dbfc08d79372bf3965724f3e | [
"MIT"
]
| null | null | null | sparweltbitool/config.py | checkout-charlie/bitool | e41ce66ab2b88992dbfc08d79372bf3965724f3e | [
"MIT"
]
| null | null | null | sparweltbitool/config.py | checkout-charlie/bitool | e41ce66ab2b88992dbfc08d79372bf3965724f3e | [
"MIT"
]
| 1 | 2015-07-22T16:53:42.000Z | 2015-07-22T16:53:42.000Z | import os
import sys
if sys.version_info[:2] >= (3, 4):
import configparser
config = configparser.ConfigParser()
else:
import ConfigParser
config = ConfigParser.ConfigParser()
config.readfp(open('app/config/config_%s.cfg' % os.environ.get('APP_ENV', 'dev')))
| 25.090909 | 82 | 0.706522 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.144928 |
de2067c1459291384093f5c6102e9ab0301ade68 | 3,164 | py | Python | src/rsa_decryption_125/app.py | seanballais/rsa-decryption-125 | df2ad27d055469e7c58a811f40cfc2c8a6171298 | [
"MIT"
]
| null | null | null | src/rsa_decryption_125/app.py | seanballais/rsa-decryption-125 | df2ad27d055469e7c58a811f40cfc2c8a6171298 | [
"MIT"
]
| null | null | null | src/rsa_decryption_125/app.py | seanballais/rsa-decryption-125 | df2ad27d055469e7c58a811f40cfc2c8a6171298 | [
"MIT"
]
| null | null | null | import tkinter
from tkinter import *
from rsa_decryption_125 import decryptor
class AppWindow(Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.init_window()
def init_window(self):
self.master.title('RSA Decryptor')
self.pack(fill=BOTH, expand=1)
self.encrypted_message_label = Label(self, text='Encrypted Message')
self.encrypted_message_label.place(x=0, y=0)
self.encrypted_message_entrybox = Entry(self)
self.encrypted_message_entrybox.place(x=122, y=0, width=300)
self.public_key_label = Label(self, text='Public Key')
self.public_key_label.place(x=0, y=25)
self.n_label = Label(self, text='n =')
self.n_label.place(x=96, y=40)
self.n_entrybox = Entry(self)
self.n_entrybox.place(x=122, y=40, width=300)
self.e_label = Label(self, text='e =')
self.e_label.place(x=96, y=70)
self.e_entrybox = Entry(self)
self.e_entrybox.place(x=122, y=65, width=300)
self.decrypted_message_label = Label(self, text='Decrypted message')
self.decrypted_message_label.place(x=0, y=95)
self.decrypted_message_box = Text(self, width=60, height=12)
box_scroll = Scrollbar(self, command=self.decrypted_message_box.yview)
self.decrypted_message_box.configure(yscrollcommand=box_scroll.set)
self.decrypted_message_box.place(x=0, y=115)
self.decrypt_button = Button(self, text="Decrypt message", command=self.get_decrypted_message)
self.decrypt_button.place(x=0, y=305)
def get_decrypted_message(self):
self.decrypt_button['text'] = 'Decrypting message...'
self.decrypt_button['state'] = 'disabled'
self.encrypted_message_entrybox['state'] = 'disabled'
self.n_entrybox['state'] = 'disabled'
self.e_entrybox['state'] = 'disabled'
encrypted = str(self.encrypted_message_entrybox.get())
n = int(self.n_entrybox.get())
e = int(self.e_entrybox.get())
decrypted = decryptor.decrypt(encrypted, n, e)
self.decrypted_message_box.delete('1.0', END)
try:
self.decrypted_message_box.insert(END, decryptor.decode_message(decrypted))
except ValueError as ve:
tkinter.messagebox.showerror(
'Error!', '{}. Invalid encrypted message or public key.'.format(ve)
)
except Exception as e:
tkinter.messagebox.showerror(
'Something went terribly wrong!', e
)
self.decrypt_button['text'] = 'Decrypt message'
self.decrypt_button['state'] = 'normal'
self.encrypted_message_entrybox['state'] = 'normal'
self.n_entrybox['state'] = 'normal'
self.e_entrybox['state'] = 'normal'
self.decrypted_message_box['state'] = 'normal'
def app_exit(self):
exit()
def main():
root = Tk()
root.geometry('430x350')
root.resizable(False, False)
app = AppWindow(root)
root.mainloop()
if __name__ == '__main__':
main() | 34.391304 | 102 | 0.631163 | 2,902 | 0.917193 | 0 | 0 | 0 | 0 | 0 | 0 | 397 | 0.125474 |
de207e25aa9bca185c57928c53cd749f04d47818 | 2,031 | py | Python | model.py | starinsun/multiagent-particle-envs | 23b1c47fad4d71347ba3de7a5e8cec910f08382d | [
"MIT"
]
| null | null | null | model.py | starinsun/multiagent-particle-envs | 23b1c47fad4d71347ba3de7a5e8cec910f08382d | [
"MIT"
]
| null | null | null | model.py | starinsun/multiagent-particle-envs | 23b1c47fad4d71347ba3de7a5e8cec910f08382d | [
"MIT"
]
| null | null | null | import paddle.fluid as fluid
import parl
from parl import layers
class MAModel(parl.Model):
def __init__(self, act_dim):
self.actor_model = ActorModel(act_dim)
self.critic_model = CriticModel()
def policy(self, obs):
return self.actor_model.policy(obs)
def value(self, obs, act):
return self.critic_model.value(obs, act)
def get_actor_params(self):
return self.actor_model.parameters()
def get_critic_params(self):
return self.critic_model.parameters()
class ActorModel(parl.Model):
def __init__(self, act_dim):
hid1_size = 64
hid2_size = 64
self.fc1 = layers.fc(
size=hid1_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc2 = layers.fc(
size=hid2_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc3 = layers.fc(
size=act_dim,
act=None,
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
def policy(self, obs):
hid1 = self.fc1(obs)
hid2 = self.fc2(hid1)
means = self.fc3(hid2)
means = means
return means
class CriticModel(parl.Model):
def __init__(self):
hid1_size = 64
hid2_size = 64
self.fc1 = layers.fc(
size=hid1_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc2 = layers.fc(
size=hid2_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc3 = layers.fc(
size=1,
act=None,
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
def value(self, obs_n, act_n):
inputs = layers.concat(obs_n + act_n, axis=1)
hid1 = self.fc1(inputs)
hid2 = self.fc2(hid1)
Q = self.fc3(hid2)
Q = layers.squeeze(Q, axes=[1])
return Q | 27.445946 | 68 | 0.573609 | 1,958 | 0.964057 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.011817 |
de20802d519423344cda6384cb09a94946775ee1 | 724 | py | Python | src/fmWidgets/FmColorEdit.py | ComputerArchitectureGroupPWr/Floorplan-Maker | 8f2922cdab16501d3bb00f93c3130d3f2c593698 | [
"MIT"
]
| null | null | null | src/fmWidgets/FmColorEdit.py | ComputerArchitectureGroupPWr/Floorplan-Maker | 8f2922cdab16501d3bb00f93c3130d3f2c593698 | [
"MIT"
]
| null | null | null | src/fmWidgets/FmColorEdit.py | ComputerArchitectureGroupPWr/Floorplan-Maker | 8f2922cdab16501d3bb00f93c3130d3f2c593698 | [
"MIT"
]
| null | null | null | from PyQt4.QtGui import QPalette, QColor
__author__ = 'pawel'
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
class FmColorEdit(QtGui.QLineEdit):
def __init__(self, parent):
super(FmColorEdit, self).__init__(parent)
self.setReadOnly(True)
def mousePressEvent(self, event):
self.color = QtGui.QColorDialog.getColor(Qt.blue)
palette = self.palette()
palette.setColor(QPalette.Base, self.color)
self.setPalette(palette)
def currentColor(self):
return self.color.name()
def setColor(self, color):
self.color = color
palette = self.palette()
palette.setColor(QPalette.Base, QColor(color))
self.setPalette(palette) | 25.857143 | 57 | 0.672652 | 606 | 0.837017 | 0 | 0 | 0 | 0 | 0 | 0 | 7 | 0.009669 |
de269b1d0a4fe87a69767fba8b3e00ccf68b4d65 | 6,543 | py | Python | admin.py | ericholscher/pypi | 4c7c13bd2061d99bbf11a803ac7a7afe3740e365 | [
"BSD-3-Clause"
]
| 1 | 2015-11-08T11:31:07.000Z | 2015-11-08T11:31:07.000Z | admin.py | ericholscher/pypi | 4c7c13bd2061d99bbf11a803ac7a7afe3740e365 | [
"BSD-3-Clause"
]
| null | null | null | admin.py | ericholscher/pypi | 4c7c13bd2061d99bbf11a803ac7a7afe3740e365 | [
"BSD-3-Clause"
]
| null | null | null |
import sys, os, urllib, StringIO, traceback, cgi, binascii, getopt, shutil
import zipfile, gzip, tarfile
#sys.path.append('/usr/local/pypi/lib')
import store, config
def set_password(store, name, pw):
""" Reset the user's password and send an email to the address given.
"""
user = store.get_user(name.strip())
if user is None:
raise ValueError, 'user name unknown to me'
store.store_user(user['name'], pw.strip(), user['email'], None)
print 'done'
def remove_package(store, name):
''' Remove a package from the database
'''
store.remove_package(name)
print 'done'
def add_owner(store, package, owner):
user = store.get_user(owner)
if user is None:
raise ValueError, 'user name unknown to me'
if not store.has_package(package):
raise ValueError, 'no such package'
store.add_role(owner, 'Owner', package)
def delete_owner(store, package, owner):
user = store.get_user(owner)
if user is None:
raise ValueError, 'user name unknown to me'
if not store.has_package(package):
raise ValueError, 'no such package'
for role in store.get_package_roles(package):
if role['role_name']=='Owner' and role['user_name']==owner:
break
else:
raise ValueError, "user is not currently owner"
store.delete_role(owner, 'Owner', package)
def add_classifier(st, classifier):
''' Add a classifier to the trove_classifiers list
'''
cursor = st.get_cursor()
cursor.execute("select max(id) from trove_classifiers")
id = cursor.fetchone()[0]
if id:
id = int(id) + 1
else:
id = 1
fields = [f.strip() for f in classifier.split('::')]
for f in fields:
assert ':' not in f
levels = []
for l in range(2, len(fields)):
c2 = ' :: '.join(fields[:l])
store.safe_execute(cursor, 'select id from trove_classifiers where classifier=%s', (c2,))
l = cursor.fetchone()
if not l:
raise ValueError, c2 + " is not a known classifier"
levels.append(l[0])
levels += [id] + [0]*(3-len(levels))
store.safe_execute(cursor, 'insert into trove_classifiers (id, classifier, l2, l3, l4, l5) '
'values (%s,%s,%s,%s,%s,%s)', [id, classifier]+levels)
def rename_package(store, old, new):
''' Rename a package. '''
if not store.has_package(old):
raise ValueError, 'no such package'
if store.has_package(new):
raise ValueError, new+' exists'
store.rename_package(old, new)
print "Please give www-data permissions to all files of", new
def add_mirror(store, root, user):
''' Add a mirror to the mirrors list
'''
store.add_mirror(root, user)
print 'done'
def delete_mirror(store, root):
''' Delete a mirror
'''
store.delete_mirror(root)
print 'done'
def delete_old_docs(config, store):
'''Delete documentation directories for packages that have been deleted'''
for i in os.listdir(config.database_docs_dir):
if not store.has_package(i):
path = os.path.join(config.database_docs_dir, i)
print "Deleting", path
shutil.rmtree(path)
def merge_user(store, old, new):
c = store.get_cursor()
if not store.get_user(old):
print "Old does not exist"
raise SystemExit
if not store.get_user(new):
print "New does not exist"
raise SystemExit
c.execute('update openids set name=%s where name=%s', (new, old))
c.execute('update sshkeys set name=%s where name=%s', (new, old))
c.execute('update roles set user_name=%s where user_name=%s', (new, old))
c.execute('delete from rego_otk where name=%s', (old,))
c.execute('update journals set submitted_by=%s where submitted_by=%s', (new, old))
c.execute('update mirrors set user_name=%s where user_name=%s', (new, old))
c.execute('update comments set user_name=%s where user_name=%s', (new, old))
c.execute('update ratings set user_name=%s where user_name=%s', (new, old))
c.execute('update comments_journal set submitted_by=%s where submitted_by=%s', (new, old))
c.execute('delete from users where name=%s', (old,))
def nuke_nested_lists(store):
c = store.get_cursor()
c.execute("""select name, version, summary from releases
where summary like '%nested lists%'""")
hits = {}
for name, version, summary in c.fetchall():
for f in store.list_files(name, version):
path = store.gen_file_path(f['python_version'], name, f['filename'])
if path.endswith('.zip'):
z = zipfile.ZipFile(path)
for i in z.infolist():
if not i.filename.endswith('.py'): continue
if 'def print_lol' in z.read(i.filename):
hits[name] = summary
elif path.endswith('.tar.gz'):
z = gzip.GzipFile(path)
t = tarfile.TarFile(fileobj=z)
for i in t.getmembers():
if not i.name.endswith('.py'): continue
f = t.extractfile(i.name)
if 'def print_lol' in f.read():
hits[name] = summary
for name in hits:
store.remove_package(name)
print '%s: %s' % (name, hits[name])
print 'removed %d packages' % len(hits)
if __name__ == '__main__':
config = config.Config('/data/pypi/config.ini')
st = store.Store(config)
st.open()
command = sys.argv[1]
args = (st, ) + tuple(sys.argv[2:])
try:
if command == 'password':
set_password(*args)
elif command == 'rmpackage':
remove_package(*args)
elif command == 'addclass':
add_classifier(*args)
print 'done'
elif command == 'addowner':
add_owner(*args)
elif command == 'delowner':
delete_owner(*args)
elif command == 'rename':
rename_package(*args)
elif command == 'addmirror':
add_mirror(*args)
elif command == 'delmirror':
delete_mirror(*args)
elif command == 'delolddocs':
delete_old_docs(config, *args)
elif command == 'send_comments':
send_comments(*args)
elif command == 'mergeuser':
merge_user(*args)
elif command == 'nuke_nested_lists':
nuke_nested_lists(*args)
else:
print "unknown command '%s'!"%command
st.changed()
finally:
st.close()
| 35.367568 | 97 | 0.599419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,850 | 0.282745 |
de26d7fc8c223d9eef08edc2aa50933adc8cafe1 | 1,777 | py | Python | scripts/geodata/address_expansions/equivalence.py | Fillr/libpostal | bce153188aff9fbe65aef12c3c639d8069e707fc | [
"MIT"
]
| 3,489 | 2015-03-03T00:21:38.000Z | 2022-03-29T09:03:05.000Z | scripts/geodata/address_expansions/equivalence.py | StephenHildebrand/libpostal | d8c9847c5686a1b66056e65128e1774f060ff36f | [
"MIT"
]
| 488 | 2015-05-29T23:04:28.000Z | 2022-03-29T11:20:24.000Z | scripts/geodata/address_expansions/equivalence.py | StephenHildebrand/libpostal | d8c9847c5686a1b66056e65128e1774f060ff36f | [
"MIT"
]
| 419 | 2015-11-24T16:53:07.000Z | 2022-03-27T06:51:28.000Z | import random
import re
import six
from itertools import izip
from geodata.address_expansions.gazetteers import *
from geodata.encoding import safe_decode, safe_encode
from geodata.text.normalize import normalized_tokens
from geodata.text.tokenize import tokenize_raw, token_types
from geodata.text.utils import non_breaking_dash_regex
def canonicals_for_language(data, language):
canonicals = set()
for d in data:
lang, dictionary, is_canonical, canonical = d.split(six.b('|'))
if language is None or lang == language:
canonicals.add(canonical)
return canonicals
def equivalent(s1, s2, gazetteer, language):
'''
Address/place equivalence
-------------------------
OSM discourages abbreviations, but to make our training data map better
to real-world input, we can safely replace the canonical phrase with an
abbreviated version and retain the meaning of the words
'''
tokens_s1 = normalized_tokens(s1)
tokens_s2 = normalized_tokens(s2)
abbreviated_s1 = list(abbreviations_gazetteer.filter(tokens_s1))
abbreviated_s2 = list(abbreviations_gazetteer.filter(tokens_s2))
if len(abbreviated_s1) != len(abbreviated_s2):
return False
for ((t1, c1, l1, d1), (t2, c2, l2, d2)) in izip(abbreviated_s1, abbreviated_s2):
if c1 != token_types.PHRASE and c2 != token_types.PHRASE:
if t1 != t2:
return False
elif c2 == token_types.PHRASE and c2 == token_types.PHRASE:
canonicals_s1 = canonicals_for_language(d1, language)
canonicals_s2 = canonicals_for_language(d2, language)
if not canonicals_s1 & canonicals_s2:
return False
else:
return False
return True
| 31.175439 | 85 | 0.68655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 287 | 0.161508 |
de27afb959d2cb13e74aaad06b80a65da178a7e6 | 170 | py | Python | Language Skills/Python/Unit 08 Loops/01 Loops/Step Up 'For's/While Loops/3-While You're at it.py | rhyep/Python_tutorials | f5c8a64b91802b005dfe7dd9035f8d8daae8c3e3 | [
"MIT"
]
| 346 | 2016-02-22T20:21:10.000Z | 2022-01-27T20:55:53.000Z | Language Skills/Python/Unit 8/1-Loops/While Loops/3-While You're at it.py | vpstudios/Codecademy-Exercise-Answers | ebd0ee8197a8001465636f52c69592ea6745aa0c | [
"MIT"
]
| 55 | 2016-04-07T13:58:44.000Z | 2020-06-25T12:20:24.000Z | Language Skills/Python/Unit 8/1-Loops/While Loops/3-While You're at it.py | vpstudios/Codecademy-Exercise-Answers | ebd0ee8197a8001465636f52c69592ea6745aa0c | [
"MIT"
]
| 477 | 2016-02-21T06:17:02.000Z | 2021-12-22T10:08:01.000Z | num = 1
while num <= 10: # Fill in the condition
x = num ** 2# Print num squared
num = num + 1# Increment num (make sure to do this!)
print x
print num
| 21.25 | 56 | 0.594118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 81 | 0.476471 |
de2838f69cfe04090e0142bb22b24b01a4243cd5 | 948 | py | Python | setup.py | povilasb/udptest | 3d16d2e6509e008b37775e7784af54b6edb6633e | [
"MIT"
]
| 2 | 2017-11-17T09:10:41.000Z | 2019-09-20T21:50:08.000Z | setup.py | povilasb/udptest | 3d16d2e6509e008b37775e7784af54b6edb6633e | [
"MIT"
]
| null | null | null | setup.py | povilasb/udptest | 3d16d2e6509e008b37775e7784af54b6edb6633e | [
"MIT"
]
| null | null | null | from setuptools import setup, find_packages
def requirements() -> list:
return [
'click==6.7',
'curio==0.8',
]
setup(
name='udptest',
version='0.1.0',
description='UDP benchmarking/testing tool.',
long_description=open('README.rst').read(),
url='https://github.com/povilasb/httpmeter',
author='Povilas Balciunas',
author_email='[email protected]',
license='MIT',
packages=find_packages(exclude=('tests')),
entry_points={
'console_scripts': [
'udptestd = udptest.server:main',
'udptest = udptest.client:main',
]
},
classifiers=[
'Programming Language :: Python :: 3.6',
'Operating System :: POSIX :: Linux',
'Natural Language :: English',
'Development Status :: 3 - Alpha',
'Topic :: System :: Networking',
'Topic :: Internet :: UDP',
],
install_requires=requirements(),
)
| 25.621622 | 49 | 0.582278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 451 | 0.475738 |
de28f51f7fb4db9f4c4cfed3b53384caa7188918 | 3,200 | py | Python | ssanchors/utilities.py | IoSR-Surrey/source-separation-anchors | c2c73312bdc7f08f37c088fa3986168813f13799 | [
"MIT"
]
| 4 | 2018-07-06T14:35:29.000Z | 2019-08-28T17:13:11.000Z | ssanchors/utilities.py | nd1511/source-separation-anchors | c2c73312bdc7f08f37c088fa3986168813f13799 | [
"MIT"
]
| 1 | 2018-06-18T17:08:28.000Z | 2018-06-19T10:45:58.000Z | ssanchors/utilities.py | nd1511/source-separation-anchors | c2c73312bdc7f08f37c088fa3986168813f13799 | [
"MIT"
]
| 1 | 2018-11-05T19:56:17.000Z | 2018-11-05T19:56:17.000Z | from __future__ import division
import numpy as np
from untwist import data
from untwist import transforms
def target_accompaniment(target, others, sample_rate=None):
"""
Given a target source and list of 'other' sources, this function returns
the target and accompaniment as untwist.data.audio.Wave objects. The
accompaniment is defined as the sum of the other sources.
Parameters
----------
target : np.ndarray or Wave, shape=(num_samples, num_channels)
The true target source.
others : List or single np.ndarray or Wave object
Each object should have the shape=(num_samples, num_channels)
If a single array is given, this should correspond to the
accompaniment.
sample_rate : int, optional
Only needed if Wave objects not provided.
Returns
-------
target : Wave, shape=(num_samples, num_channels)
accompaniment : Wave, shape=(num_samples, num_channels)
"""
if isinstance(others, list):
if not isinstance(others[0], data.audio.Wave):
others = [data.audio.Wave(_, sample_rate) for _ in others]
accompaniment = sum(other for other in others)
else:
if not isinstance(others, data.audio.Wave):
others = data.audio.Wave(others, sample_rate)
accompaniment = others
if not isinstance(target, data.audio.Wave):
target = data.audio.Wave(target, sample_rate)
return target, accompaniment
def stft_istft(num_points=2048, window='hann'):
"""
Returns an STFT and an ISTFT Processor object, both configured with the
same window and transform length. These objects are to be used as follows:
>>> stft, istft = stft_istft()
>>> x = untwist.data.audio.Wave.tone() # Or some Wave
>>> y = stft.process(x)
>>> x = istft.process(y)
Parameters
----------
num_points : int
The number of points to use for the window and the fft transform.
window : str
The type of window to use.
Returns
-------
stft : untwist.transforms.stft.STFT
An STFT processor.
itft : untwist.transforms.stft.ITFT
An ISTFT processor.
"""
stft = transforms.STFT(window, num_points, num_points // 2)
istft = transforms.ISTFT(window, num_points, num_points // 2)
return stft, istft
def ensure_audio_doesnt_clip(list_of_arrays):
"""
Takes a list of arrays and scales them by the same factor such that
none clip.
Parameters
----------
list_of_arrays : list
A list of array_like objects
Returns
-------
new_list_of_arrays : list
A list of scaled array_like objects.
"""
max_peak = 1
for audio in list_of_arrays:
audio_peak = np.max(np.abs(audio))
if audio_peak > max_peak:
max_peak = audio_peak
if max_peak >= 1:
print('Warning: Audio has been attenuated to prevent clipping')
gain = 0.999 / max_peak
new_list_of_arrays = []
for audio in list_of_arrays:
new_list_of_arrays.append(audio * gain)
else:
new_list_of_arrays = list_of_arrays
return new_list_of_arrays
| 25.806452 | 78 | 0.64625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,819 | 0.568438 |
de296667231d2bd75b621d94c889fd2ea3b5afb5 | 812 | py | Python | bids_events/Events.py | InstitutoDOr/bids_events | c00d76e1f62e5b647f94609acbc9e173a356aef7 | [
"MIT"
]
| null | null | null | bids_events/Events.py | InstitutoDOr/bids_events | c00d76e1f62e5b647f94609acbc9e173a356aef7 | [
"MIT"
]
| null | null | null | bids_events/Events.py | InstitutoDOr/bids_events | c00d76e1f62e5b647f94609acbc9e173a356aef7 | [
"MIT"
]
| null | null | null | import os
import re
class EventHandler:
def __init__(self, fname, suffix = '_events'):
# Removing extension and suffix (if present)
fname = re.sub( r'\.tsv$', '', fname )
fname = re.sub( suffix + '$', '', fname )
self.__filename = '{}{}.tsv'.format(fname, suffix)
self.trials = [] # First line should be the header
def get_filename(self):
return self.__filename
# TODO: Check if trials are lists, or join will fail
def set_trials(self, trials):
self.trials = trials
def export_bids(self):
# Preparing tsv lines
output = ''
for line in self.trials:
output += "\t".join([str(i) for i in line]) + "\n"
# Saving output
with open(self.__filename, 'w') as f:
f.write(output) | 31.230769 | 62 | 0.571429 | 792 | 0.975369 | 0 | 0 | 0 | 0 | 0 | 0 | 213 | 0.262315 |
de2bfdafb52bf7f86a472b4af4f49451d709be07 | 87 | py | Python | tests/fixtures/abcd_package/test_a.py | venmo/nose-randomly | 39db5db71a226ffdb6572d5785638e0a16379cfb | [
"BSD-3-Clause"
]
| 19 | 2015-07-30T17:27:56.000Z | 2021-08-10T07:19:43.000Z | tests/fixtures/abcd_package/test_a.py | venmo/nose-randomly | 39db5db71a226ffdb6572d5785638e0a16379cfb | [
"BSD-3-Clause"
]
| 11 | 2016-02-14T10:33:44.000Z | 2016-10-28T12:38:35.000Z | tests/fixtures/abcd_package/test_a.py | adamchainz/nose-randomly | 8a3fbeaf7cc5452c44da8c7e7573fe89391c8260 | [
"BSD-3-Clause"
]
| 4 | 2016-06-01T06:04:46.000Z | 2016-10-26T11:41:53.000Z | from unittest import TestCase
class A(TestCase):
def test_it(self):
pass
| 12.428571 | 29 | 0.666667 | 54 | 0.62069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
de2d96eb9081272f5172b90d540db88b204c04b4 | 427 | py | Python | Python_Challenge_115/6/F.py | LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
]
| 28 | 2019-10-15T13:15:26.000Z | 2021-11-08T08:23:45.000Z | Python_Challenge_115/6/F.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
]
| null | null | null | Python_Challenge_115/6/F.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
]
| 17 | 2019-09-09T00:15:36.000Z | 2021-01-28T13:08:51.000Z | '''
Statement
Fibonacci numbers are the numbers in the integer sequence starting with 1, 1 where every number after the first two is the sum of the two preceding ones:
1, 1, 2, 3, 5, 8, 13, 21, 34, ...
Given a positive integer n, print the nth Fibonacci number.
Example input
6
Example output
8
'''
num = int(input())
before, curr, i = 0, 1, 1
while num > i:
before, curr = curr, curr + before
i += 1
print(curr)
| 18.565217 | 153 | 0.676815 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 302 | 0.70726 |
de2edc2bbe1eee14e878fa5bd6b3104c3a6af8ad | 144 | py | Python | test/test_sum_up.py | marco-a-wagner/nirvana | 325756ec5f208994767b4909ed217ce716f5fcfb | [
"CC0-1.0"
]
| null | null | null | test/test_sum_up.py | marco-a-wagner/nirvana | 325756ec5f208994767b4909ed217ce716f5fcfb | [
"CC0-1.0"
]
| null | null | null | test/test_sum_up.py | marco-a-wagner/nirvana | 325756ec5f208994767b4909ed217ce716f5fcfb | [
"CC0-1.0"
]
| null | null | null | from src.sum_up import *
def test_sum_up():
x = 1
y = 2
assert sum_up(x,y) == 3
def test_sum_up3():
assert sum_up3(1,2,3) == 6 | 16 | 30 | 0.583333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
de2ffb901bbfbc3af2061583ab91b8842066be1f | 1,376 | py | Python | cluster.py | YektaDmrc/UW_GEMSEC | b9e0c995e34f098fdb607fa35a3fe47663839086 | [
"MIT"
]
| 1 | 2018-07-10T23:37:47.000Z | 2018-07-10T23:37:47.000Z | cluster.py | YektaDmrc/UW_GEMSEC | b9e0c995e34f098fdb607fa35a3fe47663839086 | [
"MIT"
]
| null | null | null | cluster.py | YektaDmrc/UW_GEMSEC | b9e0c995e34f098fdb607fa35a3fe47663839086 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 15:38:11 2018
@author: Yekta
"""
import csv
import numpy as np
from sklearn.cluster import KMeans
clon = list(csv.reader(open("C:/Users/Yekta/Desktop/stajvol3/MoS2BP Binding Characterization_07-11-17_DY.csv")))
for k in range(1,15):
fin=[]
for m in range(1,13):
dataFromCSV = list(csv.reader(open("C:/Users/Yekta/Desktop/stajvol3/573x96/recon/location"+str(m)+"/PCA"+str(k)+".csv")))
dataFromCSV=np.asarray(dataFromCSV)
dataFromCSV=dataFromCSV.T
temp=dataFromCSV[1:,1:]
temp=temp.astype(np.float)
#clusters according to properties
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(temp)
fin.append(y_kmeans)
fin=np.asarray(fin)
fin=fin.T
matrix = [[0 for x in range(13)] for y in range(97)]
matrix[0][0]="Index"
for z in range(1,97):
matrix[z][0]=clon[z+1][11]
for x in range(1,13):
matrix[0][x]=x
for y in range(1,97):
matrix[y][x]=fin[y-1,x-1]
matrix=np.asarray(matrix)
with open("C:/Users/Yekta/Desktop/stajvol3/573x96/cluster/clusteredPCA"+str(k)+".csv", 'w', newline='') as myfile:
wr = csv.writer(myfile)
wr.writerows(matrix) | 32.761905 | 130 | 0.588663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.260901 |
de319a3d0a027f8b448c09d0528c44c359822d8e | 1,440 | py | Python | test_collision/test_discretedynamicsworld.py | Klumhru/boost-python-bullet | d9ffae09157280f60cb469d8c9c9fa4c1920e3ce | [
"MIT"
]
| 2 | 2015-09-16T15:24:39.000Z | 2015-11-18T11:53:51.000Z | test_collision/test_discretedynamicsworld.py | Klumhru/boost-python-bullet | d9ffae09157280f60cb469d8c9c9fa4c1920e3ce | [
"MIT"
]
| 1 | 2018-04-04T15:33:20.000Z | 2018-04-04T15:33:20.000Z | test_collision/test_discretedynamicsworld.py | Klumhru/boost-python-bullet | d9ffae09157280f60cb469d8c9c9fa4c1920e3ce | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_collision.test_discretedynamicsworld
"""
from __future__ import unicode_literals, print_function, absolute_import
import unittest
import bullet
from .test_worlds import WorldTestDataMixin
class DiscreteDynamicsWorldTestCase(WorldTestDataMixin,
unittest.TestCase):
def setUp(self):
super(DiscreteDynamicsWorldTestCase, self).setUp()
self.world = bullet.btDiscreteDynamicsWorld(
self.dispatcher,
self.broadphase,
self.solver,
self.collision_config
)
def test_ctor(self):
pass
def test_step(self):
for i in range(120):
self.world.step_simulation(self.time_step)
def test_sync_states(self):
for i in range(120):
self.world.step_simulation(self.time_step)
self.world.synchronize_motion_states()
def test_gravity(self):
self.world.set_gravity(self.gravity)
self.assertEquals(self.world.gravity, self.gravity)
self.world.gravity = bullet.btVector3(0, 0, 0)
self.assertEquals(self.world.get_gravity(),
bullet.btVector3(0, 0, 0))
self.assertEquals(self.world.gravity,
bullet.btVector3(0, 0, 0))
def tearDown(self):
del self.world
super(DiscreteDynamicsWorldTestCase, self).tearDown()
| 28.8 | 72 | 0.634028 | 1,191 | 0.827083 | 0 | 0 | 0 | 0 | 0 | 0 | 93 | 0.064583 |
de31e808778594864eecf61a23f3d4e16b0f2a4b | 820 | py | Python | force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py | force-h2020/force-wfmanager | bcd488cd37092cacd9d0c81b544ee8c1654d1d92 | [
"BSD-2-Clause"
]
| 1 | 2019-08-19T16:02:20.000Z | 2019-08-19T16:02:20.000Z | force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py | force-h2020/force-wfmanager | bcd488cd37092cacd9d0c81b544ee8c1654d1d92 | [
"BSD-2-Clause"
]
| 396 | 2017-07-18T15:19:55.000Z | 2021-05-03T06:23:06.000Z | force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py | force-h2020/force-wfmanager | bcd488cd37092cacd9d0c81b544ee8c1654d1d92 | [
"BSD-2-Clause"
]
| 2 | 2019-03-05T16:23:10.000Z | 2020-04-16T08:59:11.000Z | # (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
from force_wfmanager.notifications.ui_notification_hooks_manager \
import \
UINotificationHooksManager
from force_wfmanager.notifications.ui_notification_plugin import \
UINotificationPlugin
class TestUINotificationHooksFactory(unittest.TestCase):
def setUp(self):
self.plugin = UINotificationPlugin()
self.factory = self.plugin.ui_hooks_factories[0]
def test_initialization(self):
self.assertEqual(self.factory.plugin_id, self.plugin.id)
self.assertEqual(self.factory.plugin_name, self.plugin.name)
def test_create_ui_hooks_manager(self):
self.assertIsInstance(
self.factory.create_ui_hooks_manager(),
UINotificationHooksManager)
| 31.538462 | 68 | 0.74878 | 517 | 0.630488 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.093902 |
de31ea78bbeb185adcdcced18fcb297d6af4dc71 | 447 | py | Python | phrasebook/middleware.py | DanCatchpole/phrasebook-django | 4f85ec40626cbb97c659448ee06f2291c8f2918b | [
"MIT"
]
| 1 | 2020-11-10T17:31:56.000Z | 2020-11-10T17:31:56.000Z | phrasebook/middleware.py | DanCatchpole/phrasebook-django | 4f85ec40626cbb97c659448ee06f2291c8f2918b | [
"MIT"
]
| null | null | null | phrasebook/middleware.py | DanCatchpole/phrasebook-django | 4f85ec40626cbb97c659448ee06f2291c8f2918b | [
"MIT"
]
| null | null | null | from django.shortcuts import redirect
from .models import UserLanguage
class FirstLoginMiddleware(object):
def process_request(self, request):
if request.user.is_authenticated:
langs = UserLanguage.objects.filter(user=request.user)
if langs.__len__() == 0:
return redirect('phrasebook:first_login')
return self.get_response(request)
def get_response(self, request):
pass
| 27.9375 | 66 | 0.680089 | 372 | 0.832215 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.053691 |
de346180214f310ac4c427bc250a7eb3f75732e4 | 113 | py | Python | PROGATE/PYTHON_I_page07.py | vox256/Codes | c408ef0fbc25af46dacef93b3496985feb98dd5c | [
"MIT"
]
| null | null | null | PROGATE/PYTHON_I_page07.py | vox256/Codes | c408ef0fbc25af46dacef93b3496985feb98dd5c | [
"MIT"
]
| null | null | null | PROGATE/PYTHON_I_page07.py | vox256/Codes | c408ef0fbc25af46dacef93b3496985feb98dd5c | [
"MIT"
]
| null | null | null | money = 2000
print(money)
# 変数moneyに5000を足して、変数moneyを上書きしてください
money += 5000
# 変数moneyの値を出力してください
print (money) | 14.125 | 36 | 0.787611 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 122 | 0.681564 |
de3486ad1b0724a14e6330a44ee92a956bf5ee2e | 380 | py | Python | quokka/modules/accounts/views.py | yencchen/quokka_epus | d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5 | [
"MIT"
]
| null | null | null | quokka/modules/accounts/views.py | yencchen/quokka_epus | d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5 | [
"MIT"
]
| null | null | null | quokka/modules/accounts/views.py | yencchen/quokka_epus | d64aeb9c5ca59ee4bdcd84381f9bb0504680f5f5 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import redirect, request, url_for
from flask.views import MethodView
from flask.ext.security import current_user
class SwatchView(MethodView):
"""
change the bootswatch theme
"""
def post(self):
current_user.set_swatch(request.form.get('swatch'))
return redirect(url_for('admin.index'))
| 22.352941 | 59 | 0.692105 | 206 | 0.542105 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.284211 |
de34fea664d85474bd07e69ca7917ce3402fb32e | 142 | py | Python | nolina/__init__.py | JohnReid/nolina | 23894517ac60d27d167447871ef85a4a78cad630 | [
"MIT"
]
| null | null | null | nolina/__init__.py | JohnReid/nolina | 23894517ac60d27d167447871ef85a4a78cad630 | [
"MIT"
]
| null | null | null | nolina/__init__.py | JohnReid/nolina | 23894517ac60d27d167447871ef85a4a78cad630 | [
"MIT"
]
| null | null | null | """Randomised linear algebra."""
import numpy.linalg as la
def normalise(v):
norm = la.norm(v)
return v if 0 == norm else v / norm
| 15.777778 | 39 | 0.640845 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.225352 |
de35289eea69e5ceb7febfc7fa32b43c5609a79c | 887 | py | Python | src/commands/reload.py | zaanposni/umfrageBot | 3e19dc0629cde394da2ae8706e6e043b4e87059d | [
"MIT"
]
| 6 | 2019-08-15T20:19:38.000Z | 2021-02-28T21:33:19.000Z | src/commands/reload.py | zaanposni/umfrageBot | 3e19dc0629cde394da2ae8706e6e043b4e87059d | [
"MIT"
]
| 31 | 2019-08-14T08:42:08.000Z | 2020-05-07T13:43:43.000Z | src/commands/reload.py | zaanposni/umfrageBot | 3e19dc0629cde394da2ae8706e6e043b4e87059d | [
"MIT"
]
| 5 | 2019-08-17T13:39:53.000Z | 2020-04-01T07:25:51.000Z | from bt_utils.console import Console
from bt_utils.config import cfg
from bt_utils.embed_templates import SuccessEmbed, WarningEmbed
from bt_utils.handle_sqlite import DatabaseHandler
SHL = Console('BundestagsBot Reload')
DB = DatabaseHandler()
settings = {
'name': 'reload',
'channels': ['team'],
'mod_cmd': True
}
async def main(client, message, params):
files_failed = cfg.reload(debug=True)
if files_failed == 0:
embed = SuccessEmbed('Success', 'All files reloaded')
else:
embed = WarningEmbed('Reloading failed', f'Failed to reload {files_failed} file(s)')
roles = cfg.options["roles_stats"].values()
# creates basic table structures if not already present
DB.create_structure(roles)
# updates table structure, e.g. if a new role has been added
DB.update_columns(roles)
await message.channel.send(embed=embed)
| 27.71875 | 92 | 0.713641 | 0 | 0 | 0 | 0 | 0 | 0 | 554 | 0.624577 | 278 | 0.313416 |
de3555aacf51f612d0e7cb4e5d614fc7db59f6c9 | 4,022 | py | Python | scanner.py | Darchiv/scambus | 0a81a67b76a5ec5117d56a4c05c4392696eb3f06 | [
"MIT"
]
| 22 | 2015-08-21T11:58:20.000Z | 2021-12-28T04:50:05.000Z | scanner.py | Darchiv/scambus | 0a81a67b76a5ec5117d56a4c05c4392696eb3f06 | [
"MIT"
]
| 5 | 2017-02-26T14:22:53.000Z | 2021-02-11T00:47:48.000Z | scanner.py | Darchiv/scambus | 0a81a67b76a5ec5117d56a4c05c4392696eb3f06 | [
"MIT"
]
| 14 | 2015-04-13T08:02:18.000Z | 2021-12-16T14:08:54.000Z | #! /usr/bin/env python2.7
import getopt, sys, time, util
from wmbus import WMBusFrame
from Crypto.Cipher import AES
def main(argv):
samplefile = ''
interface = '/dev/ttyUSB3'
usagetext = 'scanner.py -hv -i <interface>'
verbosity = 0
# setup known keys dictionarry by their device id
keys = {
'\x57\x00\x00\x44': '\xCA\xFE\xBA\xBE\x12\x34\x56\x78\x9A\xBC\xDE\xF0\xCA\xFE\xBA\xBE',
'\x00\x00\x00\x00': '\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF'
}
try:
opts, args = getopt.getopt(argv,"v:hi:",["interface="])
except getopt.GetoptError:
print usagetext
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print usagetext
sys.exit()
else:
if opt in ("-i", "--interface"):
interface = arg
if opt == "-v":
verbosity = 1
if arg == 'v':
verbosity = 2
if arg == 'vv':
verbosity = 3
while 1:
# setup values
arr = bytearray()
state = 0
frame_length = -1
# connect sniffer device
ser = util.connect_sniffer(interface)
# sleep for a while in case there is no data available
while ser.inWaiting() == 0:
time.sleep(2)
# data arrived, go and get it
while ser.inWaiting() > 0:
if (state == 0):
'''
let's get the leading two bytes from the serial stream and
check whether they match hex FF 03. Do this until we reach
the next FF 03 start sequence
TODO:
- How is the trailing byte checksum calculated?
'''
arr.append(ser.read(1))
if (arr[0] == 0xFF):
# found 0xFF, let's see whether the following byte is 0x03
arr.append(ser.read(1))
if (arr[0] == 0xFF and len(arr) == 2 and arr[1] == 0x03):
# just hit a valid start sequence => enter next state
state = 1
else:
'''
just hit an invalid start sequence. let's drop the bytes
and start over
'''
arr = bytearray()
elif (state == 1):
# let's read the frame length from the next byte
arr.append(ser.read(1))
frame_length = arr[2] -1
state = 2
elif (state == 2):
'''
in case the payload length is greater than zero bytes, read
frame_length bytes from the serial stream
'''
if (len(arr)-3 < frame_length):
for i in range(frame_length):
arr.append(ser.read(1))
if (verbosity >= 3):
# print the whole wireless M-Bus frame in hex
print util.tohex(arr)
# instantiate an wireless m-bus frame based on the data
frame = WMBusFrame()
frame.parse(arr[2:], keys)
# print wM-Bus frame information as log line
frame.log(verbosity)
# clear array and go to detect the next start sequence
arr = bytearray()
state = 0
if __name__ == "__main__":
main(sys.argv[1:])
'''
Class Scanner(threading.Thread):
def __init__(self,dev):
#something here that initialize serial port
def run():
while True:
def pack(self):
#something
def checksum(self):
#something
def write(self):
#something
'''
| 31.421875 | 92 | 0.458478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,759 | 0.437345 |
de35b41f521bfe20dfbbf60f134cdbe2d7425715 | 2,080 | py | Python | pyy1/.pycharm_helpers/python_stubs/-1550516950/gi/_gi/BaseInfo.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
]
| null | null | null | pyy1/.pycharm_helpers/python_stubs/-1550516950/gi/_gi/BaseInfo.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
]
| null | null | null | pyy1/.pycharm_helpers/python_stubs/-1550516950/gi/_gi/BaseInfo.py | pyy1988/pyy_test1 | 6bea878409e658aa87441384419be51aaab061e7 | [
"Apache-2.0"
]
| null | null | null | # encoding: utf-8
# module gi._gi
# from /usr/lib/python3/dist-packages/gi/_gi.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
# no doc
# imports
import _gobject as _gobject # <module '_gobject'>
import _glib as _glib # <module '_glib'>
import gi as __gi
import gobject as __gobject
from .object import object
class BaseInfo(object):
# no doc
def equal(self, *args, **kwargs): # real signature unknown
pass
def get_attribute(self, *args, **kwargs): # real signature unknown
pass
def get_container(self, *args, **kwargs): # real signature unknown
pass
def get_name(self, *args, **kwargs): # real signature unknown
pass
def get_namespace(self, *args, **kwargs): # real signature unknown
pass
def get_name_unescaped(self, *args, **kwargs): # real signature unknown
pass
def get_type(self, *args, **kwargs): # real signature unknown
pass
def is_deprecated(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
__hash__ = None
__name__ = 'BaseInfo'
| 25.679012 | 76 | 0.610577 | 1,758 | 0.845192 | 0 | 0 | 0 | 0 | 0 | 0 | 831 | 0.399519 |
de3618687057494d918d8f6f783dfd78edbb7ce5 | 828 | py | Python | setup.py | ntamas/python-selecta | bc9a11f288df427ceb126aa994ac3810685e2d94 | [
"MIT"
]
| 1 | 2019-02-21T14:47:40.000Z | 2019-02-21T14:47:40.000Z | setup.py | ntamas/python-selecta | bc9a11f288df427ceb126aa994ac3810685e2d94 | [
"MIT"
]
| 2 | 2015-07-11T03:32:35.000Z | 2015-08-26T09:29:40.000Z | setup.py | ntamas/python-selecta | bc9a11f288df427ceb126aa994ac3810685e2d94 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from selecta import __version__
from setuptools import setup
options = dict(
name='python-selecta',
version=__version__,
url='http://github.com/ntamas/python-selecta',
description='Python port of @garybernhardt/selecta',
license='MIT',
author='Tamas Nepusz',
author_email='[email protected]',
package_dir={'selecta': 'selecta'},
packages=['selecta'],
entry_points={
"console_scripts": [
'selecta = selecta.__main__:main'
]
},
test_suite="tests",
platforms='ALL',
classifiers=[
# TODO
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python'
]
)
setup(**options)
| 20.195122 | 56 | 0.607488 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 405 | 0.48913 |
de37ff05a0046e06ac61cbc292e777a426c175fb | 525 | py | Python | graphsaint/setup.py | alexs131/GraphSAINT | 20ac0dce1bdad0505b98ab117aaca84d1aa0bcd8 | [
"MIT"
]
| null | null | null | graphsaint/setup.py | alexs131/GraphSAINT | 20ac0dce1bdad0505b98ab117aaca84d1aa0bcd8 | [
"MIT"
]
| null | null | null | graphsaint/setup.py | alexs131/GraphSAINT | 20ac0dce1bdad0505b98ab117aaca84d1aa0bcd8 | [
"MIT"
]
| null | null | null | # cython: language_level=3
from distutils.core import setup, Extension
from Cython.Build import cythonize
import numpy
# import cython_utils
import os
os.environ["CC"] = "/opt/homebrew/Cellar/gcc/11.2.0_3/bin/g++-11"
os.environ["CXX"] = "/opt/homebrew/Cellar/gcc/11.2.0_3/bin/g++-11"
setup(ext_modules=cythonize(["graphsaint/cython_sampler.pyx", "graphsaint/cython_utils.pyx",
"graphsaint/norm_aggr.pyx"]), include_dirs=[numpy.get_include()])
# to compile: python graphsaint/setup.py build_ext --inplace
| 37.5 | 93 | 0.737143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.565714 |
de3854551e9e60f025c395d03bedb3f5b3cb6f38 | 4,958 | py | Python | models/get_networks.py | kingqiuol/pytorch-template | 8bc78f996fbbc15ae54a3055cd3d33199b4a96d8 | [
"MIT"
]
| null | null | null | models/get_networks.py | kingqiuol/pytorch-template | 8bc78f996fbbc15ae54a3055cd3d33199b4a96d8 | [
"MIT"
]
| null | null | null | models/get_networks.py | kingqiuol/pytorch-template | 8bc78f996fbbc15ae54a3055cd3d33199b4a96d8 | [
"MIT"
]
| null | null | null | import sys
def get_network(args):
""" return given network
"""
if args.MODEL.NAME == 'vgg16':
from models.vgg import vgg16_bn
net = vgg16_bn()
elif args.MODEL.NAME == 'vgg13':
from models.vgg import vgg13_bn
net = vgg13_bn()
elif args.MODEL.NAME == 'vgg11':
from models.vgg import vgg11_bn
net = vgg11_bn()
elif args.MODEL.NAME == 'vgg19':
from models.vgg import vgg19_bn
net = vgg19_bn()
elif args.MODEL.NAME == 'densenet121':
from models.densenet import densenet121
net = densenet121()
elif args.MODEL.NAME == 'densenet161':
from models.densenet import densenet161
net = densenet161()
elif args.MODEL.NAME == 'densenet169':
from models.densenet import densenet169
net = densenet169()
elif args.MODEL.NAME == 'densenet201':
from models.densenet import densenet201
net = densenet201()
elif args.MODEL.NAME == 'googlenet':
from models.googlenet import googlenet
net = googlenet()
elif args.MODEL.NAME == 'inceptionv3':
from models.inceptionv3 import inceptionv3
net = inceptionv3()
elif args.MODEL.NAME == 'inceptionv4':
from models.inceptionv4 import inceptionv4
net = inceptionv4()
elif args.MODEL.NAME == 'inceptionresnetv2':
from models.inceptionv4 import inception_resnet_v2
net = inception_resnet_v2()
elif args.MODEL.NAME == 'xception':
from models.xception import xception
net = xception()
elif args.MODEL.NAME == 'resnet18':
from models.resnet import resnet18
net = resnet18()
elif args.MODEL.NAME == 'resnet34':
from models.resnet import resnet34
net = resnet34()
elif args.MODEL.NAME == 'resnet50':
from models.resnet import resnet50
net = resnet50()
elif args.MODEL.NAME == 'resnet101':
from models.resnet import resnet101
net = resnet101()
elif args.MODEL.NAME == 'resnet152':
from models.resnet import resnet152
net = resnet152()
elif args.MODEL.NAME == 'preactresnet18':
from models.preactresnet import preactresnet18
net = preactresnet18()
elif args.MODEL.NAME == 'preactresnet34':
from models.preactresnet import preactresnet34
net = preactresnet34()
elif args.MODEL.NAME == 'preactresnet50':
from models.preactresnet import preactresnet50
net = preactresnet50()
elif args.MODEL.NAME == 'preactresnet101':
from models.preactresnet import preactresnet101
net = preactresnet101()
elif args.MODEL.NAME == 'preactresnet152':
from models.preactresnet import preactresnet152
net = preactresnet152()
elif args.MODEL.NAME == 'resnext50':
from models.resnext import resnext50
net = resnext50()
elif args.MODEL.NAME == 'resnext101':
from models.resnext import resnext101
net = resnext101()
elif args.MODEL.NAME == 'resnext152':
from models.resnext import resnext152
net = resnext152()
elif args.MODEL.NAME == 'shufflenet':
from models.shufflenet import shufflenet
net = shufflenet()
elif args.MODEL.NAME == 'shufflenetv2':
from models.shufflenetv2 import shufflenetv2
net = shufflenetv2()
elif args.MODEL.NAME == 'squeezenet':
from models.squeezenet import squeezenet
net = squeezenet()
elif args.MODEL.NAME == 'mobilenet':
from models.mobilenet import mobilenet
net = mobilenet()
elif args.MODEL.NAME == 'mobilenetv2':
from models.mobilenetv2 import mobilenetv2
net = mobilenetv2()
elif args.MODEL.NAME == 'nasnet':
from models.nasnet import nasnet
net = nasnet()
elif args.MODEL.NAME == 'attention56':
from models.attention import attention56
net = attention56()
elif args.MODEL.NAME == 'attention92':
from models.attention import attention92
net = attention92()
elif args.MODEL.NAME == 'seresnet18':
from models.senet import seresnet18
net = seresnet18()
elif args.MODEL.NAME == 'seresnet34':
from models.senet import seresnet34
net = seresnet34()
elif args.MODEL.NAME == 'seresnet50':
from models.senet import seresnet50
net = seresnet50()
elif args.MODEL.NAME == 'seresnet101':
from models.senet import seresnet101
net = seresnet101()
elif args.MODEL.NAME == 'seresnet152':
from models.senet import seresnet152
net = seresnet152()
elif args.MODEL.NAME == 'wideresnet':
from models.wideresidual import wideresnet
net = wideresnet()
elif args.MODEL.NAME == 'stochasticdepth18':
from models.stochasticdepth import stochastic_depth_resnet18
net = stochastic_depth_resnet18()
elif args.MODEL.NAME == 'stochasticdepth34':
from models.stochasticdepth import stochastic_depth_resnet34
net = stochastic_depth_resnet34()
elif args.MODEL.NAME == 'stochasticdepth50':
from models.stochasticdepth import stochastic_depth_resnet50
net = stochastic_depth_resnet50()
elif args.MODEL.NAME == 'stochasticdepth101':
from models.stochasticdepth import stochastic_depth_resnet101
net = stochastic_depth_resnet101()
elif args.MODEL.NAME == 'vit':
from models.vit import vit
net =vit()
else:
print('the network name you have entered is not supported yet')
sys.exit()
if args.MODEL.USE_GPU: # use_gpu
net = net.cuda()
return net
| 32.834437 | 65 | 0.740621 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 661 | 0.13332 |
de38b348a7c3f728ca43e602a33e53edfd8f033d | 10,812 | py | Python | tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py | hwwhww/trinity | 614b083a637c665f84b1af228541f37c25d9c665 | [
"MIT"
]
| 2 | 2020-01-30T21:51:00.000Z | 2020-07-22T14:51:05.000Z | tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py | hwwhww/trinity | 614b083a637c665f84b1af228541f37c25d9c665 | [
"MIT"
]
| null | null | null | tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py | hwwhww/trinity | 614b083a637c665f84b1af228541f37c25d9c665 | [
"MIT"
]
| null | null | null | import pytest
from hypothesis import (
given,
settings,
strategies as st,
)
from eth_utils import (
ValidationError,
)
from eth.constants import (
ZERO_HASH32,
)
from eth2.beacon.committee_helpers import (
get_crosslink_committees_at_slot,
)
from eth2.beacon.state_machines.forks.serenity.block_validation import (
validate_attestation_aggregate_signature,
validate_attestation_latest_crosslink_root,
validate_attestation_justified_block_root,
validate_attestation_justified_epoch,
validate_attestation_crosslink_data_root,
validate_attestation_slot,
)
from eth2.beacon.tools.builder.validator import (
create_mock_signed_attestation,
)
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.crosslink_records import CrosslinkRecord
@pytest.mark.parametrize(
('genesis_slot', 'genesis_epoch', 'slots_per_epoch', 'min_attestation_inclusion_delay'),
[
(8, 2, 4, 2),
]
)
@pytest.mark.parametrize(
(
'attestation_slot,'
'state_slot,'
'is_valid,'
),
[
# in bounds at lower end
(8, 2 + 8, True),
# in bounds at high end
(8, 8 + 4, True),
# attestation_slot < genesis_slot
(7, 2 + 8, False),
# state_slot > attestation_data.slot + slots_per_epoch
(8, 8 + 4 + 1, False),
# attestation_data.slot + min_attestation_inclusion_delay > state_slot
(8, 8 - 2, False),
]
)
def test_validate_attestation_slot(sample_attestation_data_params,
attestation_slot,
state_slot,
slots_per_epoch,
genesis_slot,
genesis_epoch,
min_attestation_inclusion_delay,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=attestation_slot,
)
if is_valid:
validate_attestation_slot(
attestation_data,
state_slot,
slots_per_epoch,
min_attestation_inclusion_delay,
genesis_slot,
)
else:
with pytest.raises(ValidationError):
validate_attestation_slot(
attestation_data,
state_slot,
slots_per_epoch,
min_attestation_inclusion_delay,
genesis_slot,
)
@pytest.mark.parametrize(
(
'attestation_slot,'
'attestation_justified_epoch,'
'current_epoch,'
'previous_justified_epoch,'
'justified_epoch,'
'slots_per_epoch,'
'is_valid,'
),
[
# slot_to_epoch(attestation_data.slot + 1, slots_per_epoch) >= current_epoch
(23, 2, 3, 1, 2, 8, True), # attestation_data.justified_epoch == justified_epoch
(23, 1, 3, 1, 2, 8, False), # attestation_data.justified_epoch != justified_epoch
# slot_to_epoch(attestation_data.slot + 1, slots_per_epoch) < current_epoch
(22, 1, 3, 1, 2, 8, True), # attestation_data.justified_epoch == previous_justified_epoch
(22, 2, 3, 1, 2, 8, False), # attestation_data.justified_epoch != previous_justified_epoch
]
)
def test_validate_attestation_justified_epoch(
sample_attestation_data_params,
attestation_slot,
attestation_justified_epoch,
current_epoch,
previous_justified_epoch,
justified_epoch,
slots_per_epoch,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=attestation_slot,
justified_epoch=attestation_justified_epoch,
)
if is_valid:
validate_attestation_justified_epoch(
attestation_data,
current_epoch,
previous_justified_epoch,
justified_epoch,
slots_per_epoch,
)
else:
with pytest.raises(ValidationError):
validate_attestation_justified_epoch(
attestation_data,
current_epoch,
previous_justified_epoch,
justified_epoch,
slots_per_epoch,
)
@pytest.mark.parametrize(
(
'attestation_justified_block_root,'
'justified_block_root,'
'is_valid,'
),
[
(b'\x33' * 32, b'\x22' * 32, False), # attestation.justified_block_root != justified_block_root # noqa: E501
(b'\x33' * 32, b'\x33' * 32, True),
]
)
def test_validate_attestation_justified_block_root(sample_attestation_data_params,
attestation_justified_block_root,
justified_block_root,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
justified_block_root=attestation_justified_block_root,
)
if is_valid:
validate_attestation_justified_block_root(
attestation_data,
justified_block_root
)
else:
with pytest.raises(ValidationError):
validate_attestation_justified_block_root(
attestation_data,
justified_block_root
)
@pytest.mark.parametrize(
(
'attestation_latest_crosslink,'
'attestation_crosslink_data_root,'
'state_latest_crosslink,'
'is_valid,'
),
[
(
CrosslinkRecord(0, b'\x11' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x22' * 32),
False,
),
(
CrosslinkRecord(0, b'\x33' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x11' * 32),
False,
),
(
CrosslinkRecord(0, b'\x11' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x33' * 32),
True,
),
(
CrosslinkRecord(0, b'\x33' * 32),
b'\x22' * 32,
CrosslinkRecord(0, b'\x33' * 32),
True,
),
(
CrosslinkRecord(0, b'\x33' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x33' * 32),
True,
),
]
)
def test_validate_attestation_latest_crosslink(sample_attestation_data_params,
attestation_latest_crosslink,
attestation_crosslink_data_root,
state_latest_crosslink,
slots_per_epoch,
is_valid):
sample_attestation_data_params['latest_crosslink'] = attestation_latest_crosslink
sample_attestation_data_params['crosslink_data_root'] = attestation_crosslink_data_root
attestation_data = AttestationData(**sample_attestation_data_params).copy(
latest_crosslink=attestation_latest_crosslink,
crosslink_data_root=attestation_crosslink_data_root,
)
if is_valid:
validate_attestation_latest_crosslink_root(
attestation_data,
state_latest_crosslink,
slots_per_epoch=slots_per_epoch,
)
else:
with pytest.raises(ValidationError):
validate_attestation_latest_crosslink_root(
attestation_data,
state_latest_crosslink,
slots_per_epoch=slots_per_epoch,
)
@pytest.mark.parametrize(
(
'attestation_crosslink_data_root,'
'is_valid,'
),
[
(ZERO_HASH32, True),
(b'\x22' * 32, False),
(b'\x11' * 32, False),
]
)
def test_validate_attestation_crosslink_data_root(sample_attestation_data_params,
attestation_crosslink_data_root,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
crosslink_data_root=attestation_crosslink_data_root,
)
if is_valid:
validate_attestation_crosslink_data_root(
attestation_data,
)
else:
with pytest.raises(ValidationError):
validate_attestation_crosslink_data_root(
attestation_data,
)
@settings(max_examples=1)
@given(random=st.randoms())
@pytest.mark.parametrize(
(
'num_validators,'
'slots_per_epoch,'
'target_committee_size,'
'shard_count,'
'is_valid,'
'genesis_slot'
),
[
(10, 2, 2, 2, True, 0),
(40, 4, 3, 5, True, 0),
(20, 5, 3, 2, True, 0),
(20, 5, 3, 2, False, 0),
],
)
def test_validate_attestation_aggregate_signature(genesis_state,
slots_per_epoch,
random,
sample_attestation_data_params,
is_valid,
target_committee_size,
shard_count,
keymap,
committee_config):
state = genesis_state
# choose committee
slot = 0
crosslink_committee = get_crosslink_committees_at_slot(
state=state,
slot=slot,
committee_config=committee_config,
)[0]
committee, shard = crosslink_committee
committee_size = len(committee)
assert committee_size > 0
# randomly select 3/4 participants from committee
votes_count = len(committee) * 3 // 4
assert votes_count > 0
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=slot,
shard=shard,
)
attestation = create_mock_signed_attestation(
state,
attestation_data,
committee,
votes_count,
keymap,
slots_per_epoch,
)
if is_valid:
validate_attestation_aggregate_signature(
state,
attestation,
committee_config,
)
else:
# mess up signature
attestation = attestation.copy(
aggregate_signature=(
attestation.aggregate_signature[0] + 10,
attestation.aggregate_signature[1] - 1
)
)
with pytest.raises(ValidationError):
validate_attestation_aggregate_signature(
state,
attestation,
committee_config,
)
| 31.068966 | 117 | 0.561321 | 0 | 0 | 0 | 0 | 9,976 | 0.922679 | 0 | 0 | 1,502 | 0.13892 |
de3966c1044750e98c8968c82831f55e24112044 | 13,679 | py | Python | SeqtaSDSBridge.py | jacobcurulli/SeqtaSDSBridge | 19b8da95462d1e0aa8a059c9f8075d8f7ce1b417 | [
"CC-BY-4.0"
]
| null | null | null | SeqtaSDSBridge.py | jacobcurulli/SeqtaSDSBridge | 19b8da95462d1e0aa8a059c9f8075d8f7ce1b417 | [
"CC-BY-4.0"
]
| 1 | 2021-05-21T04:52:28.000Z | 2021-05-21T05:00:10.000Z | SeqtaSDSBridge.py | jacobcurulli/SeqtaSDSBridge | 19b8da95462d1e0aa8a059c9f8075d8f7ce1b417 | [
"CC-BY-4.0"
]
| 1 | 2021-04-07T13:50:43.000Z | 2021-04-07T13:50:43.000Z | ###########################################################################################################
###########################################################################################################
## SeqtaToSDS ##
## Jacob Curulli ##
## This code is shared as is, under Creative Commons Attribution Non-Commercial 4.0 License ##
## Permissions beyond the scope of this license may be available at http://creativecommons.org/ns ##
###########################################################################################################
# Read Me
# This script will likely not work out of the box and will need to be customised
# 1. The approvedClassesCSV is a list of classes in Seqta that will be exported,
# the list is checked against the 'name' column in the public.classunit table.
# 2. A directory called 'sds' will need to be created in the root of where the script is run.
# 3. This script allows for an admin user to be added to every class (section)
# import required modules
# psycopg2 isn't usually included with python and may need to be installed separately
# see www.psycopg.org for instructions
import psycopg2
import csv
import os.path
import configparser
from datetime import datetime
# Get the date
dateNow = datetime.now()
# Read the config.ini file
config = configparser.ConfigParser()
config.read('config.ini')
# read config file for seqta database connection details
db_user=config['db']['user']
db_port=config['db']['port']
db_password=config['db']['password']
db_database=config['db']['database']
db_host=config['db']['host']
db_sslmode=config['db']['sslmode']
# read config file for school details
teamsAdminUsername=config['school']['teamsAdminUsername']
teamsAdminFirstName=config['school']['teamsAdminFirstName']
teamsAdminLastName=config['school']['teamsAdminLastName']
teamsAdminID=config['school']['teamsAdminID']
schoolName =config['school']['schoolName']
schoolSISId=config['school']['schoolSISId']
classTermName=config['school']['classTermName']
# declare some variables here so we can make sure they are present
staffList = set()
studentList = set()
classArray = tuple()
currentYear = dateNow.strftime("%Y")
print("current year is:", currentYear)
# file locations, this can be changed to suit your environment
csvApprovedClasses = "approved_classes.csv"
csvSchoolFilename = "sds/School.csv"
csvSectionFileName = "sds/Section.csv"
csvStudentFileName = "sds/Student.csv"
csvTeacherFileName = "sds/Teacher.csv"
csvTeacherRosterFileName = "sds/TeacherRoster.csv"
csvStudentEnrollmentFileName = "sds/StudentEnrollment.csv"
# remove the csv files if they already exist. This is a messy way of doing it but I learnt python 2 days ago so whatever
if os.path.exists(csvSchoolFilename):
os.remove(csvSchoolFilename)
if os.path.exists(csvSectionFileName):
os.remove(csvSectionFileName)
if os.path.exists(csvStudentFileName):
os.remove(csvStudentFileName)
if os.path.exists(csvTeacherFileName):
os.remove(csvTeacherFileName)
if os.path.exists(csvTeacherRosterFileName):
os.remove(csvTeacherRosterFileName)
if os.path.exists(csvStudentEnrollmentFileName):
os.remove(csvStudentEnrollmentFileName)
try:
# Import CSV file for approved class lists
with open(csvApprovedClasses, newline='', encoding='utf-8-sig') as csvfile:
classList = list(csv.reader(csvfile))
print (type(classList))
print (classList)
print ("Number of classes imported from csv list: ",len(classList))
except:
print("***************************")
print("Error importing csv file")
# Open connection to Seqta
try:
connection = psycopg2.connect(user=db_user,
port=db_port,
password=db_password,
database=db_database,
host = db_host,
sslmode = db_sslmode)
cursor = connection.cursor()
print(connection.get_dsn_parameters(), "\n")
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
# Fetch data for classlists
try:
for i in classList:
className = str(('[%s]' % ', '.join(map(str, (i))))[1:-1])
print ("**")
print (className)
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
# Lookup classID from Class name in Seqta
sq_classUnitQuery = "SELECT * FROM public.classunit WHERE name = (%s);"
cursor.execute(sq_classUnitQuery,(className,))
classUnitPull = cursor.fetchall()
print("Getting class information for:", (className))
for row in classUnitPull:
classUnitID = row[0]
classSubjectID = row[4]
classTermID = row[7]
print("Class unit ID (classUnitID) is:", classUnitID)
print("Class subject ID (classSubjectID) is:", classSubjectID)
print("Class term ID (classTermID) is:", classTermID)
# Check if class has a staff member or students
# If they don't we need to stop processing the class and drop it gracefully
# Get subject description for Class
sq_classSubjectQuery = "SELECT * FROM subject WHERE id = (%s);"
cursor.execute(sq_classSubjectQuery, (classSubjectID,))
classSubjectPull = cursor.fetchall()
for row in classSubjectPull:
classSubjectDescription = row[3]
classSubjectName = row[2]
classTeamName = (className + " - " + classSubjectDescription)
print("Class subject Description (classSubjectDescription) is:", classSubjectDescription)
print("Class team name (classTeamName) is:", classTeamName)
print("Class subject Name (classSubjectName) is:", classSubjectName)
# Get StaffID in this classUnit
sq_staffIDQuery = "SELECT staff from public.classinstance WHERE classunit = (%s) and date <= current_date ORDER BY id DESC LIMIT 1;"
cursor.execute(sq_staffIDQuery, (classUnitID,))
staffID_pre = cursor.fetchone()
if staffID_pre is None:
print("Couldn't find a class today or previously for classunit:", classUnitID)
print("Checking for a class up to 14 days in the future and selecting the closest date to today")
sq_staffIDQuery = "SELECT staff from public.classinstance WHERE classunit = (%s) date = current_date + interval '14 day' ORDER BY id DESC LIMIT 1;"
cursor.execute(sq_staffIDQuery, (classUnitID,))
staffID_pre = cursor.fetchone()
staffID = int(staffID_pre[0])
print("Staff ID is:", (staffID))
# Write to teacher ID list
staffList.add(staffID)
else:
staffID = int(staffID_pre[0])
print("Staff ID is:", (staffID))
# Write to teacher ID list
staffList.add(staffID)
# Get Student ID's for this classUnit
sq_studentIDListQuery = "SELECT student from \"classunitStudent\" WHERE classunit = (%s) and removed is NULL;"
cursor.execute(sq_studentIDListQuery, (classUnitID,))
studentIDArray = tuple([r[0] for r in cursor.fetchall()])
print("List of students in class name:", className)
print(studentIDArray)
for row in studentIDArray:
studentList.add(row)
# Check if the csv section file exists
csvSectionFileExists = os.path.isfile(csvSectionFileName)
# Write to the section csv file
with open(csvSectionFileName, 'a', newline='') as csvSection:
writer = csv.writer(csvSection)
# If the csv doesn't exist already we'll need to put in the headers
if not csvSectionFileExists:
writer.writerow(["SIS ID", "School SIS ID", "Section Name", "Section Number", "Term SIS ID", "Term Name", "Course SIS ID", "Course Name", "Course Description"])
writer.writerow([(classUnitID), (schoolSISId), (classTeamName), (classUnitID), (classTermID), (classTermName), (classUnitID), (classSubjectName), (classSubjectDescription)])
print ("Writing class section row")
# Check if the csv teacher roster file exists
csvTeacherRosterFileExists = os.path.isfile(csvTeacherRosterFileName)
# Write to the teacher roster csv file
with open(csvTeacherRosterFileName, 'a', newline='') as csvTeacherRoster:
writer = csv.writer(csvTeacherRoster)
# If the csv doesn't exist already we'll need to put in the headers
if not csvTeacherRosterFileExists:
writer.writerow(["Section SIS ID", "SIS ID"])
writer.writerow([(classUnitID), (staffID)])
# Also include the Teams Admin account as a teacher
writer.writerow([(classUnitID), (teamsAdminID)])
print("Written staff to roster")
# Check if the csv student enrollment file exists
csvStudentEnrollmentFileNameExists = os.path.isfile(csvStudentEnrollmentFileName)
# Write to the student enrollment csv file
with open(csvStudentEnrollmentFileName, 'a', newline='') as csvStudentEnrollment:
writer = csv.writer(csvStudentEnrollment)
# If the csv doesn't exist already we'll need to put in the headers
if not csvStudentEnrollmentFileNameExists:
writer.writerow(["Section SIS ID", "SIS ID"])
for studentInArray in studentIDArray:
writer.writerow([(classUnitID), (studentInArray)])
except:
print("")
print("***************************")
print("Error fetching class list data")
print("")
# Now we will fetch the staff information
try:
print("Print the staff lists now")
print(staffList)
for staff in staffList:
# Now get the staff information
sq_staffQuery = "SELECT * from public.staff WHERE id = (%s);"
cursor.execute(sq_staffQuery, (staff,))
staffPull = cursor.fetchall()
for row in staffPull:
staffFirstName = row[4]
staffLastName = row[7]
staffUsername = row[21]
print("Staff First Name (staffFirstName) is:", staffFirstName)
print("Staff Last Name (staffLastName) is:", staffLastName)
print("Staff username (staffUsername) is:", staffUsername)
print("Staff ID is (staff) is:", staff)
# Now we write this information to the Teacher.csv file
# Check if the csv teacher file exists
csvTeacherFileNameExists = os.path.isfile(csvTeacherFileName)
# Write to the teacher csv file
with open(csvTeacherFileName, 'a', newline='') as csvTeacher:
writer = csv.writer(csvTeacher)
# If the csv doesn't exist already we'll need to put in the headers
if not csvTeacherFileNameExists:
writer.writerow(["SIS ID", "School SIS ID", "First Name", "Last Name", "Username", "Teacher Number"])
# Also include the Teams Admin user as a teacher
writer.writerow(
[(teamsAdminID), (schoolSISId), (teamsAdminFirstName), (teamsAdminLastName), (teamsAdminUsername),
(teamsAdminID)])
writer.writerow([(staff), (schoolSISId), (staffFirstName), (staffLastName), (staffUsername), (staff)])
except:
print("something went wrong getting the staff data")
# Now we will fetch the student information
try:
print("Print the student lists now")
print(studentList)
for student in studentList:
# Now get the student information
sq_studentQuery = "SELECT * from student WHERE id = (%s) AND status = 'FULL';"
cursor.execute(sq_studentQuery, (student,))
studentPull = cursor.fetchall()
for row in studentPull:
studentFirstName = row[3]
studentLastName = row[6]
studentUsername = row[47]
print("Student First Name (studentFirstName) is:", studentFirstName)
print("Student Last Name (studentLastName) is:", studentLastName)
print("Student username (studentUsername) is:", studentUsername)
print("Student ID is (student) is:", student)
# Now we write this information to the Student.csv file
# Check if the csv Student file exists
csvStudentFileNameExists = os.path.isfile(csvStudentFileName)
# Write to the student enrollment csv file
with open(csvStudentFileName, 'a', newline='') as csvStudent:
writer = csv.writer(csvStudent)
# If the csv doesn't exist already we'll need to put in the headers
if not csvStudentFileNameExists:
writer.writerow(["SIS ID", "School SIS ID", "First Name", "Last Name", "Username", "Student Number"])
writer.writerow([(student), (schoolSISId), (studentFirstName), (studentLastName), (studentUsername), (student)])
except:
print("something went wrong getting the student data")
# write the School.csv file
try:
with open('sds/School.csv', 'a', newline='') as csvSchool:
writer = csv.writer(csvSchool)
writer.writerow(["SIS ID","Name"])
writer.writerow([(schoolSISId),(schoolName)])
except:
print("something went wrong writing the school csv file")
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed") | 45.445183 | 185 | 0.635865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,920 | 0.43278 |
de3b514aae1619036f4e6044f0e8e9c86052e8a3 | 457 | py | Python | Chapter 1/imtools.py | ai-distill/PythonVisionProgramming | 15a432b34d4ca43ab0a0bc765dbcaa9bc8de3d8e | [
"Apache-2.0"
]
| null | null | null | Chapter 1/imtools.py | ai-distill/PythonVisionProgramming | 15a432b34d4ca43ab0a0bc765dbcaa9bc8de3d8e | [
"Apache-2.0"
]
| null | null | null | Chapter 1/imtools.py | ai-distill/PythonVisionProgramming | 15a432b34d4ca43ab0a0bc765dbcaa9bc8de3d8e | [
"Apache-2.0"
]
| null | null | null | """
存储一些经常使用的图像操作
"""
import os
from PIL import Image
from numpy import *
def get_imlist(path):
"""
返回目录中所有JPG图像的文件名列表
:param path:
:return:
"""
return [os.path.join(path, f) for f in os.listdir(path) if f.endswith('.jpg')]
def imresize(im, sz):
"""
图像缩放
:param im: 图像对应的数据
:param sz: 图像要缩放的大小
:return:
"""
pil_im = Image.fromarray(uint8(im))
return array(pil_im.resize(sz))
print(get_imlist('.')) | 16.925926 | 82 | 0.610503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 268 | 0.486388 |
de3ba9c03d6171d2fbdd34396181dfc69aedd8a7 | 5,190 | py | Python | cart/views.py | lbacon17/lb-fitness | 16f78841c834ca0e45317285b6c3b05ad97501f6 | [
"W3C"
]
| null | null | null | cart/views.py | lbacon17/lb-fitness | 16f78841c834ca0e45317285b6c3b05ad97501f6 | [
"W3C"
]
| null | null | null | cart/views.py | lbacon17/lb-fitness | 16f78841c834ca0e45317285b6c3b05ad97501f6 | [
"W3C"
]
| 1 | 2021-03-31T10:55:51.000Z | 2021-03-31T10:55:51.000Z | from django.shortcuts import (
render, redirect, reverse, get_object_or_404, HttpResponse)
from django.contrib import messages
from shop.models import Product
from members.models import Member
def load_cart(request):
"""This view render's the user's cart contents"""
return render(request, 'cart/cart.html')
def add_item_to_cart(request, item_id):
"""This view lets the user add an item to their shopping cart"""
item = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
redirect_url = request.POST.get('redirect_url')
size = None
if 'item_size' in request.POST:
size = request.POST['item_size']
cart = request.session.get('cart', {})
if size:
if item_id in list(cart.keys()):
if size in cart[item_id]['items_by_size'].keys():
cart[item_id]['items_by_size'][size] += quantity
messages.success(request, f'Updated size {size.upper()} '
f'of {item.friendly_name} to '
f'{cart[item_id]["items_by_size"][size]}')
else:
cart[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Added {quantity}x '
f'{item.friendly_name} in {size.upper()}')
else:
cart[item_id] = {'items_by_size': {size: quantity}}
messages.success(request, f'Added {quantity}x {item.friendly_name}'
f' in size {size.upper()}')
else:
if item_id in list(cart.keys()):
cart[item_id] += quantity
messages.success(request, f'Added {quantity}x {item.friendly_name}'
f' to your cart. You now have {cart[item_id]} of'
f' {item.friendly_name} in your cart')
else:
cart[item_id] = quantity
messages.success(request, f'{cart[item_id]}x {item.friendly_name} '
f'was added to your cart')
request.session['cart'] = cart
return redirect(redirect_url)
def update_cart(request, item_id):
"""This view lets the user update the quantity of an item in their cart"""
item = get_object_or_404(Product, pk=item_id)
quantity = int(request.POST.get('quantity'))
size = None
if 'item_size' in request.POST:
size = request.POST['item_size']
cart = request.session.get('cart', {})
if size:
if quantity > 99:
messages.error(request, 'You cannot add this many units of a '
'product. The maximum possible quantity is 99. '
'Please enter a quantity within the accepted '
'range.')
elif quantity > 0:
cart[item_id]['items_by_size'][size] = quantity
messages.success(request, f'Updated quantity of '
f'{item.friendly_name} in size {size.upper()} '
f'to to {cart[item_id]["items_by_size"][size]}.')
else:
del cart[item_id]['items_by_size'][size]
if not cart[item_id]['items_by_size']:
cart.pop(item_id)
messages.success(request, f'Removed {item.friendly_name} in size '
f'{size.upper()} from your cart.')
else:
if quantity > 99:
messages.error(request, 'You cannot add this many units of a '
'product. The maximum possible quantity is 99. '
'Please enter a quantity within the accepted '
'range.')
elif quantity > 0:
cart[item_id] = quantity
messages.success(request, f'Successfully updated quantity of '
f'{item.friendly_name} to {cart[item_id]}.')
else:
cart.pop(item_id)
messages.success(request, f'{item.friendly_name} was removed from '
'your cart.')
request.session['cart'] = cart
return redirect(reverse('load_cart'))
def remove_item_from_cart(request, item_id):
"""This view lets the user delete an item from their shopping cart"""
try:
item = get_object_or_404(Product, pk=item_id)
size = None
if 'item_size' in request.POST:
size = request.POST['item_size']
cart = request.session.get('cart', {})
if size:
del cart[item_id]['items_by_size'][size]
if not cart[item_id]['items_by_size']:
cart.pop(item_id)
messages.success(request, f'Removed {item.friendly_name} in size '
f'{size.upper()} from your cart.')
else:
cart.pop(item_id)
messages.success(request, f'{item.friendly_name} was deleted from '
'your cart.')
request.session['cart'] = cart
return HttpResponse(status=200)
except Exception as e:
messages.error(request, f'There was a a problem removing the item.'
'{e}')
return HttpResponse(status=500)
| 41.854839 | 79 | 0.559152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,755 | 0.33815 |
de3d6c63aa40e3dc9ff43cbc7c4deca001d8d40e | 172 | py | Python | runserver.py | revalo/hush.mit.edu | e47c28c934dcfb94c52f6e12367869389e8ed7a8 | [
"MIT"
]
| 21 | 2017-10-30T20:55:48.000Z | 2021-09-03T14:06:58.000Z | runserver.py | revalo/hush.mit.edu | e47c28c934dcfb94c52f6e12367869389e8ed7a8 | [
"MIT"
]
| 1 | 2021-11-08T02:05:34.000Z | 2021-11-08T06:54:41.000Z | runserver.py | revalo/hush.mit.edu | e47c28c934dcfb94c52f6e12367869389e8ed7a8 | [
"MIT"
]
| 3 | 2017-11-15T23:18:00.000Z | 2018-01-01T06:44:03.000Z | from confess import app
from confess.config import PORT, DEBUG
if __name__ == '__main__':
app.run(
host='0.0.0.0',
port=PORT,
debug=DEBUG
) | 19.111111 | 38 | 0.593023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.110465 |
de3daa1f9c197f223b8adf05ac9c7b5634367d5c | 5,945 | py | Python | bin/plot_examples/plot_vars_barchart.py | gonzalorodrigo/ScSFWorkload | 2301dacf486df8ed783c0ba33cbbde6e9978c17e | [
"BSD-3-Clause-LBNL"
]
| 1 | 2019-03-18T18:27:49.000Z | 2019-03-18T18:27:49.000Z | bin/plot_examples/plot_vars_barchart.py | gonzalorodrigo/ScSFWorkload | 2301dacf486df8ed783c0ba33cbbde6e9978c17e | [
"BSD-3-Clause-LBNL"
]
| 1 | 2020-12-17T21:33:15.000Z | 2020-12-17T21:35:41.000Z | bin/plot_examples/plot_vars_barchart.py | gonzalorodrigo/ScSFWorkload | 2301dacf486df8ed783c0ba33cbbde6e9978c17e | [
"BSD-3-Clause-LBNL"
]
| 1 | 2021-01-05T08:23:20.000Z | 2021-01-05T08:23:20.000Z | """ Plots analysis on the workflow variables for experiments with different
workflow types and different %of workflow core hours in the workload.
Resuls are plotted as barchars that show how much the vas deviate in
single and multi from aware.
"""
import matplotlib
from orchestration import get_central_db
from orchestration.definition import ExperimentDefinition
from plot import (plot_multi_bars, produce_plot_config, extract_results,
gen_trace_ids_exps, calculate_diffs, get_args, join_rows,
replace)
from stats.trace import ResultTrace
# remote use no Display
matplotlib.use('Agg')
base_trace_id_percent, lim = get_args(2459, True)
print("Base Exp", base_trace_id_percent)
print("Using analysis of limited workflows:", lim)
db_obj = get_central_db()
edge_keys= {0: "[0,48] core.h", 48*3600:"(48, 960] core.h",
960*3600:"(960, inf.) core.h"}
trace_id_rows = []
base_exp=170
exp=ExperimentDefinition()
exp.load(db_obj, base_exp)
core_seconds_edges=exp.get_machine().get_core_seconds_edges()
# trace_id_rows = [
# [ 4166, 4167, 4168, 4184, 4185, 4186, 4202, 4203, 4204,
# 4220, 4221, 4222, 4238, 4239, 4240 ],
# [ 4169, 4170, 4171, 4187, 4188, 4189, 4205, 4206, 4207,
# 4223, 4224, 4225, 4241, 4242, 4243 ],
# [ 4172, 4173, 4174, 4190, 4191, 4192, 4208, 4209, 4210,
# 4226, 4227, 4228, 4244, 4245, 4246 ],
# [ 4175, 4176, 4177, 4193, 4194, 4195, 4211, 4212, 4213,
# 4229, 4230, 4231, 4247, 4248, 4249],
# [ 4178, 4179, 4180, 4196, 4197, 4198, 4214, 4215, 4216,
# 4232, 4233, 4234, 4250, 4251, 4252],
# [ 4181, 4182, 4183, 4199, 4200, 4201, 4217, 4218, 4219,
# 4235, 4236, 4237, 4253, 4254, 4255],
# ]
pre_base_trace_id_percent = 2549+18
trace_id_rows= join_rows(
gen_trace_ids_exps(pre_base_trace_id_percent,
inverse=False,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=1),
gen_trace_ids_exps(base_trace_id_percent,
inverse=False,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=5)
)
trace_id_colors=join_rows(
gen_trace_ids_exps(pre_base_trace_id_percent+1,
inverse=False, skip=1,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=1,
group_size=2),
gen_trace_ids_exps(base_trace_id_percent+1,
inverse=False,skip=1,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=5,
group_size=2)
)
print("IDS", trace_id_rows)
trace_id_rows=replace(trace_id_rows,
[2489, 2490, 2491,
2507, 2508, 2509,
2525, 2526, 2527],
[2801, 2802, 2803,
2804, 2805, 2806,
2807, 2808, 2809])
print("IDS", trace_id_rows)
print("COLORS", trace_id_colors)
time_labels = ["", "5%", "", "10%", "", "25%",
"", "50%", "", "75%",
"", "100%"]
manifest_label=["floodP", "longW", "wideL",
"cybers", "sipht", "montage"]
y_limits_dic={"[0,48] core.h": (1, 1000),
"(48, 960] core.h":(1,100),
"(960, inf.) core.h":(1,20)}
target_dir="percent"
grouping_types = [["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"]]
colors, hatches, legend = produce_plot_config(db_obj, trace_id_colors)
#head_file_name="percent"
head_file_name="wf_percent-b{0}".format(base_trace_id_percent)
for (name, result_type) in zip(["Turnaround speedup", "wait time(h.)",
"runtime (h.)", "stretch factor"],
["wf_turnaround", "wf_waittime",
"wf_runtime", "wf_stretch_factor"]):
if lim:
result_type="lim_{0}".format(result_type)
print("Loading: {0}".format(name))
factor=1.0/3600.0
if result_type in ("wf_stretch_factor", "lim_wf_stretch_factor"):
factor=None
edge_plot_results = extract_results(db_obj, trace_id_rows,
result_type, factor=factor,
second_pass=lim)
diffs_results = calculate_diffs(edge_plot_results, base_index=0,
group_count=3, speedup=True)
# for res_row in edge_plot_results:
# print [ x._get("median") for x in res_row]
title="{0}".format(name)
y_limits=(0,4)
print("Plotting figure")
ref_level=1.0
plot_multi_bars(
name=title,
file_name=target_dir+"/{0}-{1}-bars.png".format(head_file_name,
result_type),
title=title,
exp_rows=diffs_results,
y_axis_labels=manifest_label,
x_axis_labels=time_labels,
y_axis_general_label=name,
type_rows=grouping_types,
colors=colors,
hatches=hatches,
y_limits=y_limits,
y_log_scale=False,
legend=legend,
y_tick_count=3,
subtitle="% workflow workload",
ncols=2,
ref_line=ref_level
)
| 36.030303 | 75 | 0.518923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,671 | 0.281077 |
de3df638310dcbe32c189284547dca83d1fe51a7 | 410 | py | Python | devpotato_bot/commands/daily_titles/models/inevitable_title.py | cl0ne/cryptopotato-bot | af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1 | [
"MIT"
]
| 1 | 2021-05-15T23:41:29.000Z | 2021-05-15T23:41:29.000Z | devpotato_bot/commands/daily_titles/models/inevitable_title.py | cl0ne/cryptopotato-bot | af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1 | [
"MIT"
]
| 1 | 2022-02-19T20:38:33.000Z | 2022-02-19T23:53:39.000Z | devpotato_bot/commands/daily_titles/models/inevitable_title.py | cl0ne/cryptopotato-bot | af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1 | [
"MIT"
]
| 1 | 2021-05-15T23:42:21.000Z | 2021-05-15T23:42:21.000Z | from __future__ import annotations
from .title import TitleFromGroupChat, Base
class InevitableTitle(TitleFromGroupChat):
__tablename__ = f'{Base.TABLENAME_PREFIX}inevitable_titles'
__group_chat_back_populates__ = 'inevitable_titles'
def __repr__(self):
return ('<InevitableTitle('
f'chat_id={self.chat_id}, '
f'text="{self.text}"'
')>')
| 27.333333 | 63 | 0.660976 | 327 | 0.797561 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.32439 |
de3e64921cbcc4e464aa3d32a70cc4b3179f2705 | 1,034 | py | Python | matplotlib/gas_price_overtime.py | MatveiAleksandrovich/Artificial-Intelligence | d3d6f253e7c2256f6f9d490b077bdb50ca1da229 | [
"MIT"
]
| null | null | null | matplotlib/gas_price_overtime.py | MatveiAleksandrovich/Artificial-Intelligence | d3d6f253e7c2256f6f9d490b077bdb50ca1da229 | [
"MIT"
]
| null | null | null | matplotlib/gas_price_overtime.py | MatveiAleksandrovich/Artificial-Intelligence | d3d6f253e7c2256f6f9d490b077bdb50ca1da229 | [
"MIT"
]
| null | null | null | import requests
import pandas as pd
import matplotlib.pyplot as plt
url_gas_data = 'https://raw.githubusercontent.com/KeithGalli/matplotlib_tutorial/master/gas_prices.csv'
res1 = requests.get(url_gas_data, allow_redirects=True)
with open('gas_prices.csv', 'wb') as file:
file.write(res1.content)
plt.figure(figsize=(12, 5))
gas = pd.read_csv('gas_prices.csv')
plt.title('Gas prices overtime (in USD)', fontdict={
'fontweight': 'bold', 'fontsize': 16
})
countries_to_look_at = ['USA', 'Australia', 'South Korea', 'Canada']
for country in gas:
if country in countries_to_look_at:
plt.plot(gas.Year, gas[country], label=country, marker='.')
"""
Other way to pass data:
plt.plot(gas.Year, gas.USA, 'b.-', label='United States')
plt.plot(gas.Year, gas.Canada, 'r.-', label='Canada')
plt.plot(gas.Year, gas['South Korea'], 'g.-', label='South Korea')
plt.plot(gas.Year, gas.Australia, 'y.-', label='Australia')
"""
plt.xticks(gas.Year[::3])
plt.xlabel('Year')
plt.ylabel('US Dollars')
plt.legend()
plt.show()
| 23.5 | 103 | 0.698259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 511 | 0.494197 |
de40955063f239619674a2b5ecbf4dbaa910621e | 2,305 | py | Python | integration_tests/test_surveys.py | ONSdigital/sdx-tester | df193867c0d5e9dbf39790c85c41b07a9efed756 | [
"MIT"
]
| null | null | null | integration_tests/test_surveys.py | ONSdigital/sdx-tester | df193867c0d5e9dbf39790c85c41b07a9efed756 | [
"MIT"
]
| null | null | null | integration_tests/test_surveys.py | ONSdigital/sdx-tester | df193867c0d5e9dbf39790c85c41b07a9efed756 | [
"MIT"
]
| null | null | null | import unittest
import uuid
from app import survey_loader
from app import message_manager
from app.tester import run_survey
class TestSurveys(unittest.TestCase):
@classmethod
def setUpClass(cls):
message_manager.start()
@classmethod
def tearDownClass(cls):
message_manager.stop()
def tearDown(self):
print('-----------------------------------------------------')
def execute(self, survey_dict: dict, receipt: bool, multiple_files: bool, eq_version_3: bool = False):
for key, survey_list in survey_dict.items():
for survey in survey_list:
tx_id = str(uuid.uuid4())
survey['tx_id'] = tx_id
with self.subTest(msg=f'test {key} with tx_id: {tx_id}'):
print('---------------------------------------------------------')
print(f'testing {key} with tx_id: {tx_id}')
result = run_survey(message_manager, survey, eq_version_3)
print(str(result))
self.assertFalse(result.timeout, f'{key} has timed out!')
self.assertIsNone(result.quarantine, f'{key} has been quarantined!')
self.assertIsNotNone(result.dap_message, f'{key} did not post dap message!')
if multiple_files:
self.assertTrue(len(result.files) > 1, f'{key} should have produced multiple files!')
else:
self.assertTrue(len(result.files) == 1, f'{key} should have produced one file only!')
if receipt:
self.assertIsNotNone(result.receipt, f'{key} did not produce receipt!')
print("PASSED")
def test_dap(self):
surveys = survey_loader.get_dap()
self.execute(surveys, receipt=True, multiple_files=False)
def test_survey(self):
surveys = survey_loader.get_survey()
self.execute(surveys, receipt=True, multiple_files=True)
def test_hybrid(self):
surveys = survey_loader.get_hybrid()
self.execute(surveys, receipt=True, multiple_files=True)
def test_feedback(self):
survey = survey_loader.get_feedback()
self.execute(survey, receipt=False, multiple_files=False)
| 37.786885 | 109 | 0.572668 | 2,177 | 0.944469 | 0 | 0 | 140 | 0.060738 | 0 | 0 | 407 | 0.176573 |
de42aa506b54f4487685cb532dc908e5f790e4a5 | 509 | py | Python | shared/app_business_logic.py | c-w/python-loadtests | 3ffd3dc89780b9372a5d20a71b2becec121ff3d2 | [
"Apache-2.0"
]
| 2 | 2020-02-12T23:03:09.000Z | 2020-02-12T23:09:42.000Z | shared/app_business_logic.py | c-w/python-loadtests | 3ffd3dc89780b9372a5d20a71b2becec121ff3d2 | [
"Apache-2.0"
]
| null | null | null | shared/app_business_logic.py | c-w/python-loadtests | 3ffd3dc89780b9372a5d20a71b2becec121ff3d2 | [
"Apache-2.0"
]
| null | null | null | from os import environ
from azure.storage.table import TableService
azure_account_name = environ['AZURE_ACCOUNT_NAME']
azure_account_key = environ['AZURE_ACCOUNT_KEY']
azure_table_name = environ['AZURE_TABLE_NAME']
table = TableService(azure_account_name, azure_account_key)
get_entity = table.get_entity
def fetch_value(ident):
partition_key = ident[:3]
row_key = ident
entity = get_entity(azure_table_name, partition_key, row_key)
value = entity.get('value')
return {'value': value}
| 28.277778 | 65 | 0.776031 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.139489 |
de44446f8526c9f2e48dd37b76b2ac71ae33e71b | 3,424 | py | Python | csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
]
| null | null | null | csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
]
| null | null | null | csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
]
| 1 | 2018-10-30T08:57:14.000Z | 2018-10-30T08:57:14.000Z | import logging
import h5py
import numpy as np
from sklearn.utils import check_random_state
from csrank.constants import OBJECT_RANKING
from csrank.dataset_reader.letor_dataset_reader import LetorDatasetReader
from csrank.dataset_reader.objectranking.util import sub_sampling
NAME = "LetorObjectRankingDatasetReader"
class LetorObjectRankingDatasetReader(LetorDatasetReader):
def __init__(self, random_state=None, train_obj=5, **kwargs):
super(LetorObjectRankingDatasetReader, self).__init__(learning_problem=OBJECT_RANKING, **kwargs)
self.logger = logging.getLogger(NAME)
self.random_state = check_random_state(random_state)
self.train_obj = train_obj
self.__load_dataset__()
def __load_dataset__(self):
file = h5py.File(self.train_file, 'r')
self.X_train, self.Y_train = self.get_rankings_dict(file)
if self.train_obj is None:
self.train_obj = 5
self.X_train, self.Y_train = self.sub_sampling_for_dictionary()
file = h5py.File(self.test_file, 'r')
self.X_test, self.Y_test = self.get_rankings_dict(file)
self.logger.info("Done loading the dataset")
def get_rankings_dict(self, file):
lengths = file["lengths"]
X = dict()
Y = dict()
for ranking_length in np.array(lengths):
features = np.array(file["X_{}".format(ranking_length)])
rankings = np.array(file["Y_{}".format(ranking_length)])
X[ranking_length], Y[ranking_length] = self.X, self.rankings = features, rankings
self.__check_dataset_validity__()
return X, Y
def sub_sampling_for_dictionary(self):
X = []
Y = []
for n in self.X_train.keys():
if n > self.train_obj:
x, y = sub_sampling(NAME, self.X_train[n], self.Y_train[n], n_objects=self.train_obj)
if len(X) == 0:
X = np.copy(x)
Y = np.copy(y)
else:
X = np.concatenate([X, x], axis=0)
Y = np.concatenate([Y, y], axis=0)
if self.train_obj in self.X_train.keys():
X = np.concatenate([X, np.copy(self.X_train[self.train_obj])], axis=0)
Y = np.concatenate([Y, np.copy(self.Y_train[self.train_obj])], axis=0)
self.logger.info("Sampled instances {} objects {}".format(X.shape[0], X.shape[1]))
return X, Y
def splitter(self, iter):
pass
def get_train_test_datasets(self, n_datasets):
return self.X_train, self.Y_train, self.X_test, self.Y_test
def get_complete_dataset(self):
pass
def get_single_train_test_split(self):
return self.X_train, self.Y_train, self.X_test, self.Y_test
# if __name__ == '__main__':
# import sys
# import os
# import inspect
# dirname = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# logging.basicConfig(filename=os.path.join(dirname, 'log.log'), level=logging.DEBUG,
# format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
# logger = logging.getLogger(name='letor')
# sys.path.append("..")
# for n in [2008, 2007]:
# ds = LetorObjectRankingDatasetReader(year=n)
# logger.info(ds.X_train.shape)
# logger.info(np.array(ds.X_test.keys).shape)
| 39.356322 | 104 | 0.629965 | 2,445 | 0.714077 | 0 | 0 | 0 | 0 | 0 | 0 | 761 | 0.222255 |
de44c06366bdb1cf83f5f3bb8ad925cefb959cf0 | 1,222 | py | Python | app/wqFull/dev/trans.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
]
| null | null | null | app/wqFull/dev/trans.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
]
| null | null | null | app/wqFull/dev/trans.py | fkwai/geolearn | 30cb4353d22af5020a48100d07ab04f465a315b0 | [
"MIT"
]
| 2 | 2021-04-04T02:45:59.000Z | 2022-03-19T09:41:39.000Z | from sklearn.preprocessing import QuantileTransformer, PowerTransformer
from hydroDL.data import usgs, gageII, gridMET, ntn, GLASS, transform, dbBasin
import numpy as np
import matplotlib.pyplot as plt
from hydroDL.post import axplot, figplot
from hydroDL import kPath
import json
import os
import importlib
importlib.reload(axplot)
importlib.reload(figplot)
dm = dbBasin.DataFrameBasin('weathering')
# subset
dm.saveSubset('B10', ed='2009-12-31')
dm.saveSubset('A10', sd='2010-01-01')
yrIn = np.arange(1985, 2020, 5).tolist()
t1 = dbBasin.func.pickByYear(dm.t, yrIn, pick=False)
t2 = dbBasin.func.pickByYear(dm.t, yrIn)
dm.createSubset('rmYr5', dateLst=t1)
dm.createSubset('pkYr5', dateLst=t2)
codeSel = ['00915', '00925', '00930', '00935', '00940', '00945', '00955']
d1 = dbBasin.DataModelBasin(dm, varY=codeSel, subset='rmYr5')
d2 = dbBasin.DataModelBasin(dm, varY=codeSel, subset='pkYr5')
mtdY = ['QT' for var in codeSel]
d1.trans(mtdY=mtdY)
d1.saveStat('temp')
# d2.borrowStat(d1)
d2.loadStat('temp')
yy = d2.y
yP = d2.transOutY(yy)
yO = d2.Y
# TS
indS = 1
fig, axes = figplot.multiTS(d1.t, [yO[:, indS, :], yP[:, indS, :]])
fig.show()
indS = 1
fig, axes = figplot.multiTS(d1.t, [yy[:, indS, :]])
fig.show()
| 25.458333 | 78 | 0.714403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.139116 |
de463062073e4c38b0ef746845b5c9b821ed145e | 659 | py | Python | pysad/statistics/__init__.py | selimfirat/pysad | dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede | [
"BSD-3-Clause"
]
| 155 | 2020-08-17T12:52:38.000Z | 2022-03-19T02:59:26.000Z | pysad/statistics/__init__.py | shubhsoni/pysad | dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede | [
"BSD-3-Clause"
]
| 2 | 2020-10-22T09:50:28.000Z | 2021-02-15T02:01:44.000Z | pysad/statistics/__init__.py | shubhsoni/pysad | dff2ff38258eb8a85c9d34cf5f0b876fc1dc9ede | [
"BSD-3-Clause"
]
| 14 | 2020-10-09T17:08:23.000Z | 2022-03-25T11:30:12.000Z | """
The :mod:`pysad.statistics` module contains methods to keep track of statistics on streaming data.
"""
from .abs_statistic import AbsStatistic
from .average_meter import AverageMeter
from .count_meter import CountMeter
from .max_meter import MaxMeter
from .median_meter import MedianMeter
from .min_meter import MinMeter
from .running_statistic import RunningStatistic
from .sum_meter import SumMeter
from .sum_squares_meter import SumSquaresMeter
from .variance_meter import VarianceMeter
__all__ = ["AbsStatistic", "AverageMeter", "CountMeter", "MaxMeter", "MedianMeter", "MinMeter", "RunningStatistic", "SumMeter", "SumSquaresMeter", "VarianceMeter"]
| 41.1875 | 163 | 0.814871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.362671 |
de481c317eb312cc809e4b8eb2f8383abd96ba97 | 324 | py | Python | src/elrados/views.py | IamShobe/elrados | dd2523e1523591c7a3213dfd062b376f41bb9f18 | [
"MIT"
]
| 2 | 2018-07-20T11:03:42.000Z | 2019-06-06T06:00:12.000Z | src/elrados/views.py | IamShobe/elrados | dd2523e1523591c7a3213dfd062b376f41bb9f18 | [
"MIT"
]
| null | null | null | src/elrados/views.py | IamShobe/elrados | dd2523e1523591c7a3213dfd062b376f41bb9f18 | [
"MIT"
]
| 2 | 2018-12-18T16:00:34.000Z | 2019-04-08T14:29:02.000Z | """Global index view."""
import pkg_resources
from django.shortcuts import render
def index(request):
"""Basic view."""
plugins = \
[plugin.load() for plugin in
pkg_resources.iter_entry_points(group='elrados.plugins')]
return render(request, "index.html", {
"plugins": plugins
})
| 21.6 | 66 | 0.641975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.243827 |
de48207667680d4095ac834e7b25417f0ab4f83a | 2,274 | py | Python | examples/old/zipline_momentun.py | sherrytp/TradingEvolved | 4bc9cc18244954bff37a80f67cce658bd0802b5d | [
"Apache-2.0"
]
| null | null | null | examples/old/zipline_momentun.py | sherrytp/TradingEvolved | 4bc9cc18244954bff37a80f67cce658bd0802b5d | [
"Apache-2.0"
]
| null | null | null | examples/old/zipline_momentun.py | sherrytp/TradingEvolved | 4bc9cc18244954bff37a80f67cce658bd0802b5d | [
"Apache-2.0"
]
| 1 | 2022-03-26T07:11:18.000Z | 2022-03-26T07:11:18.000Z | import pandas as pd
import matplotlib.pyplot as plt
from zipline.finance.commission import PerShare
from zipline.api import set_commission, symbol, order_target_percent
import zipline
from models.live_momentum import LiveMomentum
with open('/Users/landey/Desktop/Eonum/live_model/eouniverse/stock_list.txt', 'r') as f:
data = f.read().split()
tickers = data[:20]
etf_list = tickers[15:]
def initialize(context):
context.momemtum_window = 5
context.momemtum_window2 = 10
context.min_long_momentum = 60
context.max_short_momentum = -10
context.long = 15
context.short = 15
context.etfs = 5
comm_model = PerShare(cost=0.0005)
set_commission(comm_model)
def handle_data(context, data):
equity_symbols = [symbol(i) for i in tickers]
etf_symbols = [symbol(i) for i in etf_list]
hist_window = max(context.momemtum_window, context.momemtum_window2)
equity_hist = data.history(equity_symbols, 'close', hist_window, "1d").copy()
etf_hist = data.history(etf_symbols, 'close', hist_window, "1d").copy()
equity_hist_ = equity_hist.rename(columns={col: col.symbol for col in equity_hist.columns})
etf_hist_ = etf_hist.rename(columns={col: col.symbol for col in etf_hist.columns})
live = LiveMomentum(equity_hist_, etf_hist_, etf_mom=300, mom1=20, mom2=40,
min_long_mom=20, max_short_mom=-2, long=10,
short=5, etf=3)
# print(equity_hist_)
equity, etf = live.risk_model()
if equity:
for ticker, weight in equity.items():
if data.can_trade(symbol(ticker)) and weight != 0:
order_target_percent(symbol(ticker), weight)
if etf:
for ticker, weight in etf.items():
if data.can_trade(symbol(ticker)) and weight != 0:
order_target_percent(symbol(ticker), weight)
start = pd.Timestamp('2020-3-22', tz='utc')
end = pd.Timestamp('2020-4-28', tz='utc')
perf = zipline.run_algorithm(start=start,
end=end,
initialize=initialize,
capital_base=100000,
handle_data=handle_data,
bundle='sep')
perf.portfolio_value.plot()
plt.show()
| 30.72973 | 95 | 0.647757 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 149 | 0.065523 |
de4860345de948d81c21b1062677ea640e28f033 | 10,120 | py | Python | packages/robotControl/scripts/intercept.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
]
| 2 | 2021-01-15T13:27:19.000Z | 2021-08-04T08:40:52.000Z | packages/robotControl/scripts/intercept.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
]
| null | null | null | packages/robotControl/scripts/intercept.py | Falcons-Robocup/code | 2281a8569e7f11cbd3238b7cc7341c09e2e16249 | [
"Apache-2.0"
]
| 5 | 2018-05-01T10:39:31.000Z | 2022-03-25T03:02:35.000Z | # Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# Jan Feitsma, March 2020
# Robot will continuously intercept around current position.
#
# For description and usage hints, execute with '-h'
import sys, os
import time
import logging, signal
logging.basicConfig(level=logging.INFO)
import math, random
import argparse
import falconspy
import rtdb2tools
from robotLibrary import RobotLibrary
from worldState import WorldState
from FalconsCoordinates import *
def parse_arguments():
parser = argparse.ArgumentParser(description="""Automated single-robot intercept test. Robot will choose a position in a circle, continuously attempting to intercept the ball and pass to next robot. Includes a fallback getball in case ball bounces off. See also: wrapper script interceptCircle.py.""")
parser.add_argument('-a', '--actionradius', help='zone/action radius: in case intercept fails and ball is within this radius, just do a getball fallback', type=float, default=2.0)
parser.add_argument('-c', '--circleradius', help='home position circle radius on which robot default positions are set', type=float, default=4.0)
parser.add_argument('-t', '--target', help='pass target (default: next robot)', type=float, nargs=2, default=None)
parser.add_argument('-n', '--targetnoise', help='aim given amount of meters at a random side next to the target', type=float, default=0.0)
parser.add_argument('-w', '--dontwait', help='do not wait with intercepting until previous robot has the ball', action='store_true')
parser.add_argument('-q', '--quiet', help='suppress output', action='store_true')
# TODO use option 'active' intercept?
parser.add_argument('--home', help='home position (x,y), default calculated based on available robots and circleradius', type=float, nargs=2, default=None)
parser.add_argument('-i', '--index', help='home position index to choose (starting count at 1), default calculate based on available robots', type=int, nargs=2, default=None)
parser.add_argument('-r', '--robot', help='robot ID to use (intended only for simulation)', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('--ignore', help='robots to be ignored', type=int, nargs='+', default=[1])
return parser.parse_args()
def calcCirclePos(robotIdx, numRobots, radius=3, center=(0,0)):
"""
Helper function to distribute robot positions on a circle.
"""
gamma = 2*math.pi / numRobots
x = radius * math.cos(gamma * robotIdx) + center[0]
y = radius * math.sin(gamma * robotIdx) + center[1]
phi = gamma * robotIdx - math.pi
return (x, y, phi)
class Interceptor():
def __init__(self, settings):
self.settings = settings
self.rl = RobotLibrary(settings.robot, joystick=False)
self.ws = WorldState(settings.robot)
self.ws.startMonitoring()
self.otherRobotHasBall = False
# setup logging
self.state = None
self.logger = self.initializeLogger()
if settings.quiet:
self.logger.setLevel(logging.NOTSET)
# setup signal handler for proper shutdown
self.done = False
signal.signal(signal.SIGINT, self.signalHandler)
def signalHandler(self, signal, frame):
self.done = True
self.ws.stopMonitoring()
self.rl.shutdown()
# TODO: this is not yet working as intended...
def initializeLogger(self):
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.INFO)
format_str = '%(asctime)s.%(msecs)03d - %(levelname)-8s - r' + str(self.settings.robot) + ' - %(message)s'
date_format = '%Y-%m-%dT%H:%M:%S'
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.handlers = [] # clear
log.addHandler(stream_handler)
return logging.getLogger(__name__)
def activeRobots(self):
# ignore r1, if it is present, because it can never contribute
return [r for r in self.ws.activeRobots() if not r in self.settings.ignore]
def calculateRobotIndex(self):
# optional overrule
if self.settings.index != None:
idx0 = self.settings.index[0] - 1
n = self.settings.index[1]
else:
# default: get active robots and figure out index of this robot
a = self.activeRobots()
while not self.settings.robot in a: # init robustness
time.sleep(0.1)
a = self.activeRobots()
n = len(a)
idx0 = a.index(self.settings.robot)
return (idx0, n)
def calculateHomePosition(self):
# optional overrule
if self.settings.home != None:
(x, y) = self.settings.home
rz = math.pi * 0.5
else:
# default: position on a circle
(idx0, n) = self.calculateRobotIndex()
(x, y, rz) = calcCirclePos(idx0, n, self.settings.circleradius)
# face the ball if possible
b = self.ws.getBallPosition()
if b:
rz = math.atan2(b.y - y, b.x - x)
return (x, y, rz)
def canStartIntercept(self):
# optional overrule
if self.settings.dontwait:
return True
# robot should never stand idle if ball is closeby
if self.ballCloseBy():
return True
# check if previous robot has the ball
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
otherIdx = a[(idx0-1) % n]
# wait for the pass (state change in ball possession)
# robot should not intercept when other robot is still turning for instance
otherRobotHadBall = self.otherRobotHasBall
self.otherRobotHasBall = self.ws.hasBall(otherIdx)
return self.otherRobotHasBall == False and otherRobotHadBall == True
def determineTarget(self, noise=None):
# optional overrule
if self.settings.target:
(x, y) = self.settings.target
rz = 0
else:
# calculate nominal position of next robot
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
otherIdx = a[(idx0+1) % n]
(x, y, rz) = calcCirclePos(idx0+1, n, self.settings.circleradius)
otherPos = RobotPose(x, y, rz)
# add noise?
if noise:
# add noise to RCS x (perpendicular)
ownPos = self.ws.getRobotPosition()
ownPos.Rz = math.atan2(y - ownPos.y, x - ownPos.x) # face target
otherPosRcs = otherPos.transform_fcs2rcs(ownPos)
# offset RCS x in a random direction
r = random.randint(0, 1)
otherPosRcs.x += (r * 2 - 1) * noise
# back to FCS
otherPos = otherPosRcs.transform_rcs2fcs(ownPos)
return (otherPos.x, otherPos.y) # ignore Rz
def canPass(self):
# compare current position of next robot with nominal
nominalTarget = self.determineTarget()
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
if len(a) == 1:
return True
otherIdx = a[(idx0+1) % n]
otherPos = self.ws.getRobotPosition(otherIdx)
delta = otherPos - RobotPose(*nominalTarget)
return delta.xy().size() < 0.3
def ballCloseBy(self):
bd = self.ws.ballDistance()
return bd != None and bd < self.settings.actionradius
def setState(self, state):
# only write state change
if self.state != state:
# write to RDL eventlog
os.system('export TURTLE5K_ROBOTNUMBER=' + str(self.settings.robot) + ' ; frun diagnostics sendEvent INFO "' + state + '" > /dev/null')
# write to stdout?
logging.info(state)
self.state = state
def run(self):
# iterate
while not self.done:
# move to starting position, facing ball, with coarse tolerances
homePos = self.calculateHomePosition()
self.setState('repositioning / waiting')
self.rl.move(*homePos, xyTol=0.1, rzTol=0.05)
# wait until robot can start his intercept/getBall attempt
if self.canStartIntercept():
# get the ball, preferably via intercept
while not self.ws.hasBall() and not self.done:
if self.ballCloseBy():
self.setState('getball fallback')
self.rl.getBall() # blocking
else:
self.setState('intercepting')
self.rl.interceptBall() # blocking (with not-so-obvious RUNNING/FAILED criteria -> see mp code)
# note: good weather behavior: ball comes into the action radius while the robot
# is continuously intercepting on it, until pass/fail, so the getBall
# fallback should only start after intercept returns FAILED due to the ball moving away
# other robot might still be repositioning
while not self.canPass() and not self.done:
self.setState('waiting to pass')
time.sleep(0.1)
# pass to next robot and sleep a while, to prevent directly chasing the ball
self.setState('pass')
self.rl.passTo(*self.determineTarget(self.settings.targetnoise))
time.sleep(0.5)
else:
# sleep a bit
time.sleep(0.1)
# check if robot went offline
self.done = self.settings.robot not in self.activeRobots()
def main(args):
interceptor = Interceptor(args)
interceptor.run()
if __name__ == '__main__':
args = parse_arguments()
if args.robot == 0 or args.robot == None:
raise RuntimeError("Error: could not determine robot ID, this script should run on a robot")
main(args)
| 42.700422 | 305 | 0.619368 | 7,154 | 0.706917 | 0 | 0 | 0 | 0 | 0 | 0 | 3,306 | 0.32668 |
de4f135b4907a9ad1ee036150f5775fba0b81256 | 4,859 | py | Python | arpym/tools/plc.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
]
| 6 | 2021-04-10T13:24:30.000Z | 2022-03-26T08:20:42.000Z | arpym/tools/plc.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
]
| null | null | null | arpym/tools/plc.py | dpopadic/arpmRes | ddcc4de713b46e3e9dcb77cc08c502ce4df54f76 | [
"MIT"
]
| 6 | 2019-08-13T22:02:17.000Z | 2022-02-09T17:49:12.000Z | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib.ticker import FuncFormatter
def tick_label_func(y, pos=None):
return '%1.f' % (5 * y * 1e-2 // 5)
def tick_label_func_1(y, pos=None):
return '%0.0f' % y
def plot_dynamic_strats(t, v_t_strat, v_t_risky, w_t_risky, h_t_risky,
num, j_sel):
"""For details, see here.
Parameters
----------
t : array, shape (t_,)
v_t_strat : array, shape (j_,t_)
v_t_risky : array, shape (j_,t_)
w_t_risky : array, shape (j_,t_)
h_t_risky: array, shape (j_,t_)
num: int
j_sel: int
"""
# adjust v_t_risky so that it has the same initial value as v_t_strat
v_t_risky = v_t_risky * v_t_strat[0, 0] / v_t_risky[0, 0]
mu_risky = np.mean(v_t_risky, axis=0, keepdims=True).reshape(-1)
sig_risky = np.std(v_t_risky, axis=0, keepdims=True).reshape(-1)
mu_strat = np.mean(v_t_strat, axis=0, keepdims=True).reshape(-1)
sig_strat = np.std(v_t_strat, axis=0, keepdims=True).reshape(-1)
plt.style.use('arpm')
fig = plt.figure()
gs = GridSpec(1, 2)
gs1 = GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[0])
num_bins = int(round(100 * np.log(v_t_strat.shape[1])))
lgrey = [0.8, 0.8, 0.8] # light grey
dgrey = [0.4, 0.4, 0.4] # dark grey
j_ = v_t_risky.shape[0]
x_min = t[0]
x_max = 1.25 * t[-1]
y_min = v_t_strat[0, 0] / 4
y_max = v_t_strat[0, 0] * 2.25
# scatter plot
ax4 = plt.subplot(gs[1])
plt.scatter(v_t_risky[:, -1], v_t_strat[:, -1], marker='.', s=2)
so = np.sort(v_t_risky[:, -1])
plt.plot(so, so, label='100% risky instrument', color='r')
plt.plot([y_min, v_t_risky[j_sel, -1], v_t_risky[j_sel, -1]],
[v_t_strat[j_sel, -1], v_t_strat[j_sel, -1], y_min], 'b--')
plt.plot(v_t_risky[j_sel, -1], v_t_strat[j_sel, -1], 'bo')
ax4.set_xlim(y_min, y_max)
ax4.set_ylim(y_min, y_max)
ax4.xaxis.set_major_formatter(FuncFormatter(tick_label_func))
ax4.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
plt.xlabel('Strategy')
plt.ylabel('Risky instrument')
plt.legend()
# weights and holdings
ax3 = plt.subplot(gs1[2])
y_min_3 = np.min(h_t_risky[j_sel, : -1])
y_max_3 = np.max(h_t_risky[j_sel, : -1])
plt.sca(ax3)
plt.plot(t, w_t_risky[j_sel, :], color='b')
plt.axis([x_min, x_max, 0, 1])
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
plt.yticks(np.linspace(0, 1, 3), color='b')
plt.ylabel('Weights', color='b')
plt.xlabel('Time')
ax3_2 = ax3.twinx()
plt.plot(t, h_t_risky[j_sel, :], color='black')
plt.ylabel('Holdings', color='black')
plt.axis([x_min, x_max, y_min_3 - 1, y_max_3 + 1])
plt.yticks(np.linspace(y_min_3, y_max_3, 3))
ax3_2.yaxis.set_major_formatter(FuncFormatter(tick_label_func_1))
ax1 = plt.subplot(gs1[0], sharex=ax3, sharey=ax4)
# simulated path, standard deviation of strategy
for j in range(j_ - num, j_):
plt.plot(t, v_t_strat[j, :], color=lgrey)
plt.plot(t, v_t_strat[j_sel, :], color='b')
plt.plot(t, mu_strat + sig_strat, color='orange')
plt.plot(t, mu_strat - sig_strat, color='orange')
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
# histogram
y_hist, x_hist = np.histogram(v_t_strat[:, -1], num_bins)
scale = 0.25 * t[-1] / np.max(y_hist)
y_hist = y_hist * scale
plt.barh(x_hist[: -1], y_hist, height=(max(x_hist) - min(x_hist)) /
(len(x_hist) - 1), left=t[-1], facecolor=dgrey, edgecolor=dgrey)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel('Strategy')
ax1.set_ylim(y_min, y_max)
ax1.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
# risky instrument
ax2 = plt.subplot(gs1[1], sharex=ax3, sharey=ax4)
# simulated path, standard deviation of risky instrument
for j in range(j_ - num, j_):
plt.plot(t, v_t_risky[j, :], color=lgrey)
plt.plot(t, v_t_risky[j_sel, :], color='b')
plt.plot(t, mu_risky + sig_risky, color='orange')
plt.plot(t, mu_risky - sig_risky, color='orange')
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
# histogram
y_hist, x_hist = np.histogram(v_t_risky[:, -1], num_bins)
scale = 0.25 * t[-1] / np.max(y_hist)
y_hist = y_hist * scale
plt.barh(x_hist[: -1], y_hist, height=(max(x_hist) - min(x_hist)) /
(len(x_hist) - 1), left=t[-1], facecolor=dgrey, edgecolor=dgrey)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel('Risky instrument')
ax2.set_ylim(y_min, y_max)
ax2.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
plt.grid(True)
plt.tight_layout()
return fig, gs
| 35.210145 | 106 | 0.61844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 791 | 0.162791 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.