hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dbde2669ec80772673d5f19711266d806c399444
| 7,303 |
py
|
Python
|
biggan_discovery/orojar_discover.py
|
andreasjansson/OroJaR
|
ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9
|
[
"BSD-2-Clause"
] | 47 |
2021-07-26T07:54:06.000Z
|
2022-02-07T16:37:40.000Z
|
biggan_discovery/orojar_discover.py
|
andreasjansson/OroJaR
|
ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9
|
[
"BSD-2-Clause"
] | 1 |
2021-09-14T07:26:15.000Z
|
2021-09-14T07:45:59.000Z
|
biggan_discovery/orojar_discover.py
|
andreasjansson/OroJaR
|
ebb8c0333bbd33c063b6dd4a21a0559eb86d13e9
|
[
"BSD-2-Clause"
] | 7 |
2021-08-21T07:33:35.000Z
|
2022-03-16T23:21:29.000Z
|
"""
Learns a matrix of Z-Space directions using a pre-trained BigGAN Generator.
Modified from train.py in the PyTorch BigGAN repo.
"""
import os
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.optim
import utils
import train_fns
from sync_batchnorm import patch_replication_callback
from torch.utils.tensorboard import SummaryWriter
from orojar import orojar
from direction_utils import visualize_directions, load_G, get_direction_padding_fn, init_wandb, download_G
from layers import fast_gram_schmidt, norm
# The main training file. Config is a dictionary specifying the configuration
# of this training run.
def run(config):
if config['wandb_entity'] is not None:
init_wandb(config, config['experiment_name'], config['wandb_entity'], 'imagenet')
if config["G_path"] is None: # Download a pre-trained G if necessary
download_G()
config["G_path"] = 'checkpoints/138k'
G, state_dict, device, experiment_name = load_G(config)
# If parallel, parallelize the GD module
if config['parallel']:
G = nn.DataParallel(DataParallelLoss(G))
if config['cross_replica']:
patch_replication_callback(G)
num_gpus = torch.cuda.device_count()
print(f'Using {num_gpus} GPUs')
# If search_space != 'all', then we need to pad the z components that we are leaving alone:
pad = get_direction_padding_fn(config)
direction_size = config['dim_z'] if config['search_space'] == 'all' else config['ndirs']
# A is our (ndirs, |z|) matrix of directions, where ndirs indicates the number of directions we want to learn
if config['load_A'] == 'coords':
print('Initializing with standard basis directions')
A = torch.nn.Parameter(torch.eye(config['ndirs'], direction_size, device=device), requires_grad=True)
elif config['load_A'] == 'random':
print('Initializing with random directions')
A = torch.nn.Parameter(torch.empty(config['ndirs'], direction_size, device=device), requires_grad=True)
torch.nn.init.kaiming_normal_(A)
else:
raise NotImplementedError
# We only learn A; G is left frozen during training:
optim = torch.optim.Adam(params=[A], lr=config['A_lr'])
# Allow for different batch sizes in G
G_batch_size = max(config['G_batch_size'], config['batch_size'])
z_, y_ = utils.prepare_z_y(G_batch_size, G.module.G.dim_z, config['n_classes'],
device=device, fp16=config['G_fp16'])
# Prepare a fixed z & y to see individual sample evolution throghout training
fixed_z, fixed_y = utils.prepare_z_y(G_batch_size, G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
fixed_z.sample_()
fixed_y.sample_()
interp_z, interp_y = utils.prepare_z_y(config["n_samples"], G.module.G.dim_z,
config['n_classes'], device=device,
fp16=config['G_fp16'])
interp_z.sample_()
interp_y.sample_()
if config['fix_class'] is not None:
y_ = y_.new_full(y_.size(), config['fix_class'])
fixed_y = fixed_y.new_full(fixed_y.size(), config['fix_class'])
interp_y = interp_y.new_full(interp_y.size(), config['fix_class'])
print('Beginning training at epoch %d...' % state_dict['epoch'])
# Train for specified number of epochs, although we mostly track G iterations.
iters_per_epoch = 1000
dummy_loader = [None] * iters_per_epoch # We don't need any real data
path_size = config['path_size']
# Simply stores a |z|-dimensional one-hot vector indicating each direction we are learning:
direction_indicators = torch.eye(config['ndirs']).to(device)
G.eval()
G.module.optim = optim
writer = SummaryWriter('%s/%s' % (config['logs_root'], experiment_name))
sample_sheet = train_fns.save_and_sample(G.module.G, None, G.module.G, z_, y_, fixed_z, fixed_y,
state_dict, config, experiment_name)
writer.add_image('samples', sample_sheet, 0)
interp_y_ = G.module.G.shared(interp_y)
norm_fn = norm
# Make directions orthonormal via Gram Schmidt followed a normalization:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
if config["vis_during_training"]:
print("Generating initial visualizations...")
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], 0, fps=24)
for epoch in range(state_dict['epoch'], config['num_epochs']):
if config['pbar'] == 'mine':
pbar = utils.progress(dummy_loader, displaytype='s1k' if config['use_multiepoch_sampler'] else 'eta')
else:
pbar = tqdm(dummy_loader)
for i, _ in enumerate(pbar):
state_dict['itr'] += 1
z_.sample_()
if config['fix_class'] is None:
y_.sample_()
y = G.module.G.shared(y_)
# OroJaR taken w.r.t. w_sampled, NOT z:
w = torch.zeros((G_batch_size, config['ndirs'])) # equal to the one-hot w
penalty = G(z_, y, w=w, Q=Q.repeat(num_gpus, 1)).mean()
optim.zero_grad()
penalty.backward()
optim.step()
# re-orthogonalize A for visualizations and the next training iteration:
Q = pad(norm_fn(fast_gram_schmidt(A))) if not config["no_ortho"] else pad(A)
# Log metrics to TensorBoard/WandB:
cur_training_iter = epoch * iters_per_epoch + i
writer.add_scalar(f'Metrics/orojar', penalty.item(), cur_training_iter)
writer.add_scalar('Metrics/direction_norm', A.pow(2).mean().pow(0.5).item(), cur_training_iter)
# Save directions and log visuals:
if not (state_dict['itr'] % config['save_every']):
torch.save(A.cpu().detach(), '%s/%s/A_%06d.pt' %
(config['weights_root'], experiment_name, cur_training_iter))
if config["vis_during_training"]:
interp_vis = visualize_directions(G.module.G, interp_z, interp_y_, path_sizes=path_size, Q=Q,
high_quality=False, npv=1)
for w_ix in range(config['ndirs']):
writer.add_video('G_ema/w%03d' % w_ix, interp_vis[w_ix], cur_training_iter, fps=24)
state_dict['epoch'] += 1
if __name__ == '__main__':
main()
| 41.259887 | 113 | 0.633301 |
dbde5b0dbcab23e1ef72b1961f7810d2ab8cc002
| 6,452 |
py
|
Python
|
file_importer0.py
|
Alva789ro/Regional-Comprehensive-Economic-Partnership-RCEP-Economic-Default-Risk-Analysis
|
454583f47883edae17391f101b10b38b68c9834f
|
[
"MIT"
] | 1 |
2021-03-15T19:44:36.000Z
|
2021-03-15T19:44:36.000Z
|
file_importer0.py
|
Alva789ro/Regional-Comprehensive-Economic-Partnership-RCEP-Economic-Default-Risk-Analysis
|
454583f47883edae17391f101b10b38b68c9834f
|
[
"MIT"
] | null | null | null |
file_importer0.py
|
Alva789ro/Regional-Comprehensive-Economic-Partnership-RCEP-Economic-Default-Risk-Analysis
|
454583f47883edae17391f101b10b38b68c9834f
|
[
"MIT"
] | 1 |
2022-02-06T01:33:41.000Z
|
2022-02-06T01:33:41.000Z
|
import xlsxwriter
import pandas as pd
import numpy as np
import mysql.connector
australia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Australia')
brunei=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Brunei')
cambodia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Cambodia')
china=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='China')
indonesia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Indonesia')
japan=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Japan')
lao=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Lao')
malaysia=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Malaysia')
myanmar=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Myanmar')
new_zeland=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='New Zeland')
philipines=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Philipines')
singapore=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Singapore')
thailand=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Thailand')
vietnam=pd.read_excel(r'\Users\jesica\Desktop\RCEP_economic_analysis.xlsx', sheet_name='Vietnam')
'''
mydb = mysql.connector.connect(
host = "localhost",
user = "root",
passwd = "",
database = ""
)
mycursor = mydb.cursor()
sqlformula1 = "INSERT INTO australia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(australia['Year'], australia['RGDP'], australia['NGDP'], australia['GDP_pc'], australia['Inflation'], australia['Unemployment_Rate'], australia['Net_LB'], australia['Account_Balance']):
mycursor.execute(sqlformula1, [a, b, c, d, e, f, g, h])
sqlformula2 = "INSERT INTO brunei VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(brunei['Year'], brunei['RGDP'], brunei['NGDP'], brunei['GDP_pc'], brunei['Inflation'], brunei['Unemployment_Rate'], brunei['Net_LB'], brunei['Account_Balance']):
mycursor.execute(sqlformula2, [a, b, c, d, e, f, g, h])
sqlformula3 = "INSERT INTO cambodia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(cambodia['Year'], cambodia['RGDP'], cambodia['NGDP'], cambodia['GDP_pc'], cambodia['Inflation'], cambodia['Unemployment_Rate'], cambodia['Net_LB'], cambodia['Account_Balance']):
mycursor.execute(sqlformula3, [a, b, c, d, e, f, g, h])
sqlformula4 = "INSERT INTO china VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(china['Year'], china['RGDP'], china['NGDP'], china['GDP_pc'], china['Inflation'], china['Unemployment_Rate'], china['Net_LB'], china['Account_Balance']):
mycursor.execute(sqlformula4, [a, b, c, d, e, f, g, h])
sqlformula5 = "INSERT INTO indonesia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(indonesia['Year'], indonesia['RGDP'], indonesia['NGDP'], indonesia['GDP_pc'], indonesia['Inflation'], indonesia['Unemployment_Rate'], indonesia['Net_LB'], indonesia['Account_Balance']):
mycursor.execute(sqlformula5, [a, b, c, d, e, f, g, h])
sqlformula6 = "INSERT INTO japan VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(japan['Year'], japan['RGDP'], japan['NGDP'], japan['GDP_pc'], japan['Inflation'], japan['Unemployment_Rate'], japan['Net_LB'], japan['Account_Balance']):
mycursor.execute(sqlformula6, [a, b, c, d, e, f, g, h])
sqlformula7 = "INSERT INTO lao VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(lao['Year'], lao['RGDP'], lao['NGDP'], lao['GDP_pc'], lao['Inflation'], lao['Unemployment_Rate'], lao['Net_LB'], lao['Account_Balance']):
mycursor.execute(sqlformula7, [a, b, c, d, e, f, g, h])
sqlformula8 = "INSERT INTO malaysia VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(malaysia['Year'], malaysia['RGDP'], malaysia['NGDP'], malaysia['GDP_pc'], malaysia['Inflation'], malaysia['Unemployment_Rate'], malaysia['Net_LB'], malaysia['Account_Balance']):
mycursor.execute(sqlformula8, [a, b, c, d, e, f, g, h])
sqlformula9 = "INSERT INTO myanmar VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(myanmar['Year'], myanmar['RGDP'], myanmar['NGDP'], myanmar['GDP_pc'], myanmar['Inflation'], myanmar['Unemployment_Rate'], myanmar['Net_LB'], myanmar['Account_Balance']):
mycursor.execute(sqlformula9, [a, b, c, d, e, f, g, h])
sqlformula10 = "INSERT INTO new_zeland VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(new_zeland['Year'], new_zeland['RGDP'], new_zeland['NGDP'], new_zeland['GDP_pc'], new_zeland['Inflation'], new_zeland['Unemployment_Rate'], new_zeland['Net_LB'], new_zeland['Account_Balance']):
mycursor.execute(sqlformula10, [a, b, c, d, e, f, g, h])
sqlformula11 = "INSERT INTO philipines VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(philipines['Year'], philipines['RGDP'], philipines['NGDP'], philipines['GDP_pc'], philipines['Inflation'], philipines['Unemployment_Rate'], philipines['Net_LB'], philipines['Account_Balance']):
mycursor.execute(sqlformula11, [a, b, c, d, e, f, g, h])
sqlformula12 = "INSERT INTO singapore VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(singapore['Year'], singapore['RGDP'], singapore['NGDP'], singapore['GDP_pc'], singapore['Inflation'], singapore['Unemployment_Rate'], singapore['Net_LB'], singapore['Account_Balance']):
mycursor.execute(sqlformula12, [a, b, c, d, e, f, g, h])
sqlformula13 = "INSERT INTO thailand VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(thailand['Year'], thailand['RGDP'], thailand['NGDP'], thailand['GDP_pc'], thailand['Inflation'], thailand['Unemployment_Rate'], thailand['Net_LB'], thailand['Account_Balance']):
mycursor.execute(sqlformula13, [a, b, c, d, e, f, g, h])
sqlformula14 = "INSERT INTO vietnam VALUES(%s, %s, %s, %s, %s, %s, %s, %s)"
for a, b, c, d, e, f, g, h in zip(vietnam['Year'], vietnam['RGDP'], vietnam['NGDP'], vietnam['GDP_pc'], vietnam['Inflation'], vietnam['Unemployment_Rate'], vietnam['Net_LB'], vietnam['Account_Balance']):
mycursor.execute(sqlformula14, [a, b, c, d, e, f, g, h])
'''
#mydb.commit()
| 72.494382 | 227 | 0.67359 |
dbde9d29ec27efc6184d1b64557b595e4c3e0755
| 6,837 |
py
|
Python
|
packer/resources/bootstrap_node.py
|
VIOOH/nile
|
893802387b3891ea02aae05f39ff4aa051354f18
|
[
"Apache-2.0"
] | 4 |
2021-07-09T15:55:04.000Z
|
2021-12-28T10:34:12.000Z
|
packer/resources/bootstrap_node.py
|
Kishore88/nile
|
893802387b3891ea02aae05f39ff4aa051354f18
|
[
"Apache-2.0"
] | null | null | null |
packer/resources/bootstrap_node.py
|
Kishore88/nile
|
893802387b3891ea02aae05f39ff4aa051354f18
|
[
"Apache-2.0"
] | 3 |
2021-07-09T15:55:09.000Z
|
2021-07-10T10:24:02.000Z
|
#!/usr/bin/env python3
import os
import re
import glob
import boto3
import requests
import subprocess
from time import sleep
AWS_REGION = os.environ['AWS_REGION']
DEPLOY_UUID = os.environ['DEPLOY_UUID']
SERVICE_NAME = os.environ['SERVICE_NAME']
MOUNT_POINT = "/var/lib/" + SERVICE_NAME
NIC_IP = os.environ['NIC_IP']
TAG_KEY = os.environ['TAG_KEY']
if __name__ == '__main__':
boto3.setup_default_session(region_name=AWS_REGION)
# uses: DEPLOY_UUID, TAG_KEY
attach_eni_ids()
# uses: MOUNT_POINT, SERVICE_NAME, DEPLOY_UUID, TAG_KEY
attach_ebs()
# uses: NIC_IP
change_default_route()
| 27.130952 | 111 | 0.584321 |
dbdf10789c79bc37376b7fcca6ae9a0b284ccf83
| 4,412 |
py
|
Python
|
parsers/srum_parser.py
|
otoriocyber/Chronos
|
d70e22afed723c0ad4b7e449bd253e15351bada6
|
[
"MIT"
] | 12 |
2021-04-20T23:08:28.000Z
|
2022-02-18T01:23:42.000Z
|
parsers/srum_parser.py
|
otoriocyber/chronos
|
d70e22afed723c0ad4b7e449bd253e15351bada6
|
[
"MIT"
] | null | null | null |
parsers/srum_parser.py
|
otoriocyber/chronos
|
d70e22afed723c0ad4b7e449bd253e15351bada6
|
[
"MIT"
] | null | null | null |
import csv
import datetime
import random
import os
from parsers.parser_base import ParserBase
FILE_TIME_EPOCH = datetime.datetime(1601, 1, 1)
FILE_TIME_MICROSECOND = 10
| 45.020408 | 122 | 0.522439 |
dbdfe8bd7432ddafe572e5360642d04fff53f125
| 593 |
py
|
Python
|
tests/csrf_tests/test_context_processor.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/csrf_tests/test_context_processor.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
tests/csrf_tests/test_context_processor.py
|
Yoann-Vie/esgi-hearthstone
|
115d03426c7e8e80d89883b78ac72114c29bed12
|
[
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null |
from django.http import HttpRequest
from django.middleware.csrf import _compare_salted_tokens as equivalent_tokens
from django.template.context_processors import csrf
from django.test import SimpleTestCase
| 39.533333 | 88 | 0.768971 |
dbe03e42e9b3d1f6b76452c08db25467c63f6175
| 30,141 |
py
|
Python
|
python/das/types.py
|
marza-animation-planet/das
|
1c7460dfdd5f138d8317c72900e90b23c0c28c7b
|
[
"MIT"
] | 4 |
2018-11-19T01:36:01.000Z
|
2022-02-28T03:41:12.000Z
|
python/das/types.py
|
marza-animation-planet/das
|
1c7460dfdd5f138d8317c72900e90b23c0c28c7b
|
[
"MIT"
] | 1 |
2021-12-26T11:57:07.000Z
|
2022-03-16T07:18:01.000Z
|
python/das/types.py
|
marza-animation-planet/das
|
1c7460dfdd5f138d8317c72900e90b23c0c28c7b
|
[
"MIT"
] | 2 |
2019-03-30T10:28:12.000Z
|
2022-03-04T17:58:39.000Z
|
import sys
import das
import traceback
def _wrap(self, rhs):
st = self._get_schema_type()
rv = self.__class__(rhs if st is None else st._validate_self(rhs))
rv._set_schema_type(self._get_schema_type())
return rv
def _adapt_value(self, value, key=None, index=None):
return das.adapt_value(value, schema_type=self._get_schema_type(), key=key, index=index)
def _validate(self, schema_type=None):
if schema_type is None:
schema_type = self._get_schema_type()
if schema_type is not None:
schema_type.validate(self)
self._set_schema_type(schema_type)
def _gvalidate(self):
st = self._get_schema_type()
if st is not None:
# run self validation first (container validation)
st._validate_self(self)
if hasattr(self, "_is_global_validation_enabled"):
if not self._is_global_validation_enabled():
# Skip global validaton
return
gvcb = self._get_validate_globally_cb()
if gvcb is not None:
gvcb()
if hasattr(self, "_validate_globally"):
try:
getattr(self, "_validate_globally")()
except:
_, ei, tb = sys.exc_info()
ei = das.ValidationError("Global Validation Failed (%s)" % str(ei))
raise ei.__class__, ei, tb
class Tuple(TypeBase, tuple):
class Sequence(TypeBase, list):
# def __contains__(self, y):
# try:
# _v = self._adapt_value(y, index=0)
# return super(Sequence, self).__contains__(_v)
# except:
# return False
class Set(TypeBase, set):
class Dict(TypeBase, dict):
# def __contains__(self, k):
# try:
# _k = self._adapt_key(k)
# return super(Dict, self).__contains__(_k)
# except:
# return False
class Struct(TypeBase):
# Override of dict.has_key
# Override of dict.pop
# Override of dict.popitem
# Override of dict.clear
# Override of dict.copy
# Override of dict.setdefault
# Override of dict.update
| 30.945585 | 146 | 0.563883 |
dbe088a01c052a1745bf75ba9a62254a5f03f63b
| 4,829 |
py
|
Python
|
track.py
|
AliabbasMerchant/fileTrackAndBackup
|
8cdf97be58c69061e1f60c08f89b524d91f8c17d
|
[
"MIT"
] | 6 |
2018-08-11T12:00:11.000Z
|
2021-06-15T09:11:34.000Z
|
track.py
|
AliabbasMerchant/fileTrackAndBackup
|
8cdf97be58c69061e1f60c08f89b524d91f8c17d
|
[
"MIT"
] | null | null | null |
track.py
|
AliabbasMerchant/fileTrackAndBackup
|
8cdf97be58c69061e1f60c08f89b524d91f8c17d
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from help import *
import time
# short-forms are used, so as to reduce the .json file size
# t : type - d or f
# d : directory
# f : file
# ts : timestamp
# dirs : The dictionary containing info about directory contents
# time : edit time of the file/folder
# s : size of the file/folder
# p : full path of the file/folder
# n : name of the main file/folder in the .json file
# i : info about the contents in the .json file
# folder = {'t': 'd', 's': get_size(dir_dict), 'p': full_path + '/' + entity, 'time': get_time(stats), 'dirs': dir_dict}
# file = {'t': 'f', 's': stats.st_size, 'p': full_path + '/' + entity, 'time': get_time(stats)}
# info = {'t': 'd', 's': size, 'p': base_path, 'time': get_time(stats), 'dirs': info}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
# info = {'t': 'f', 's': stats.st_size, 'p': base_path, 'time': get_time(stats)}
# write = {'n': examine_name, 'ts': time.time(), 'i': info}
no_of_files = 0
no_of_dirs = 0
examine_name = ''
save_filename = ''
_base_path = None
_ignore = False
errors = []
if __name__ == '__main__':
track(os.getcwd(), os.getcwd(), output=True)
| 40.923729 | 120 | 0.57631 |
dbe08be9b24ad6685aafe893f7f5de89e33519df
| 31,693 |
py
|
Python
|
clang/tools/scan-build-py/libscanbuild/analyze.py
|
Kvarnefalk/llvm-project
|
8b5f5798aaa24074609d151ea906d114cf5337c2
|
[
"Apache-2.0"
] | 1 |
2021-02-17T04:40:38.000Z
|
2021-02-17T04:40:38.000Z
|
clang/tools/scan-build-py/libscanbuild/analyze.py
|
Kvarnefalk/llvm-project
|
8b5f5798aaa24074609d151ea906d114cf5337c2
|
[
"Apache-2.0"
] | null | null | null |
clang/tools/scan-build-py/libscanbuild/analyze.py
|
Kvarnefalk/llvm-project
|
8b5f5798aaa24074609d151ea906d114cf5337c2
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
""" This module implements the 'scan-build' command API.
To run the static analyzer against a build is done in multiple steps:
-- Intercept: capture the compilation command during the build,
-- Analyze: run the analyzer against the captured commands,
-- Report: create a cover report from the analyzer outputs. """
import re
import os
import os.path
import json
import logging
import multiprocessing
import tempfile
import functools
import subprocess
import contextlib
import datetime
import shutil
import glob
from collections import defaultdict
from libscanbuild import command_entry_point, compiler_wrapper, \
wrapper_environment, run_build, run_command, CtuConfig
from libscanbuild.arguments import parse_args_for_scan_build, \
parse_args_for_analyze_build
from libscanbuild.intercept import capture
from libscanbuild.report import document
from libscanbuild.compilation import split_command, classify_source, \
compiler_language
from libscanbuild.clang import get_version, get_arguments, get_triple_arch, \
ClangErrorException
from libscanbuild.shell import decode
__all__ = ['scan_build', 'analyze_build', 'analyze_compiler_wrapper']
COMPILER_WRAPPER_CC = 'analyze-cc'
COMPILER_WRAPPER_CXX = 'analyze-c++'
CTU_EXTDEF_MAP_FILENAME = 'externalDefMap.txt'
CTU_TEMP_DEFMAP_FOLDER = 'tmpExternalDefMaps'
def need_analyzer(args):
""" Check the intent of the build command.
When static analyzer run against project configure step, it should be
silent and no need to run the analyzer or generate report.
To run `scan-build` against the configure step might be necessary,
when compiler wrappers are used. That's the moment when build setup
check the compiler and capture the location for the build process. """
return len(args) and not re.search(r'configure|autogen', args[0])
def prefix_with(constant, pieces):
""" From a sequence create another sequence where every second element
is from the original sequence and the odd elements are the prefix.
eg.: prefix_with(0, [1,2,3]) creates [0, 1, 0, 2, 0, 3] """
return [elem for piece in pieces for elem in [constant, piece]]
def get_ctu_config_from_args(args):
""" CTU configuration is created from the chosen phases and dir. """
return (
CtuConfig(collect=args.ctu_phases.collect,
analyze=args.ctu_phases.analyze,
dir=args.ctu_dir,
extdef_map_cmd=args.extdef_map_cmd)
if hasattr(args, 'ctu_phases') and hasattr(args.ctu_phases, 'dir')
else CtuConfig(collect=False, analyze=False, dir='', extdef_map_cmd=''))
def get_ctu_config_from_json(ctu_conf_json):
""" CTU configuration is created from the chosen phases and dir. """
ctu_config = json.loads(ctu_conf_json)
# Recover namedtuple from json when coming from analyze-cc or analyze-c++
return CtuConfig(collect=ctu_config[0],
analyze=ctu_config[1],
dir=ctu_config[2],
extdef_map_cmd=ctu_config[3])
def create_global_ctu_extdef_map(extdef_map_lines):
""" Takes iterator of individual external definition maps and creates a
global map keeping only unique names. We leave conflicting names out of
CTU.
:param extdef_map_lines: Contains the id of a definition (mangled name) and
the originating source (the corresponding AST file) name.
:type extdef_map_lines: Iterator of str.
:returns: Mangled name - AST file pairs.
:rtype: List of (str, str) tuples.
"""
mangled_to_asts = defaultdict(set)
for line in extdef_map_lines:
mangled_name, ast_file = line.strip().split(' ', 1)
mangled_to_asts[mangled_name].add(ast_file)
mangled_ast_pairs = []
for mangled_name, ast_files in mangled_to_asts.items():
if len(ast_files) == 1:
mangled_ast_pairs.append((mangled_name, next(iter(ast_files))))
return mangled_ast_pairs
def merge_ctu_extdef_maps(ctudir):
""" Merge individual external definition maps into a global one.
As the collect phase runs parallel on multiple threads, all compilation
units are separately mapped into a temporary file in CTU_TEMP_DEFMAP_FOLDER.
These definition maps contain the mangled names and the source
(AST generated from the source) which had their definition.
These files should be merged at the end into a global map file:
CTU_EXTDEF_MAP_FILENAME."""
def write_global_map(arch, mangled_ast_pairs):
""" Write (mangled name, ast file) pairs into final file. """
extern_defs_map_file = os.path.join(ctudir, arch,
CTU_EXTDEF_MAP_FILENAME)
with open(extern_defs_map_file, 'w') as out_file:
for mangled_name, ast_file in mangled_ast_pairs:
out_file.write('%s %s\n' % (mangled_name, ast_file))
triple_arches = glob.glob(os.path.join(ctudir, '*'))
for triple_path in triple_arches:
if os.path.isdir(triple_path):
triple_arch = os.path.basename(triple_path)
extdefmap_dir = os.path.join(ctudir, triple_arch,
CTU_TEMP_DEFMAP_FOLDER)
extdef_map_lines = generate_extdef_map_lines(extdefmap_dir)
mangled_ast_pairs = create_global_ctu_extdef_map(extdef_map_lines)
write_global_map(triple_arch, mangled_ast_pairs)
# Remove all temporary files
shutil.rmtree(extdefmap_dir, ignore_errors=True)
def run_analyzer_parallel(args):
""" Runs the analyzer against the given compilation database. """
def exclude(filename, directory):
""" Return true when any excluded directory prefix the filename. """
if not os.path.isabs(filename):
# filename is either absolute or relative to directory. Need to turn
# it to absolute since 'args.excludes' are absolute paths.
filename = os.path.normpath(os.path.join(directory, filename))
return any(re.match(r'^' + exclude_directory, filename)
for exclude_directory in args.excludes)
consts = {
'clang': args.clang,
'output_dir': args.output,
'output_format': args.output_format,
'output_failures': args.output_failures,
'direct_args': analyzer_params(args),
'force_debug': args.force_debug,
'ctu': get_ctu_config_from_args(args)
}
logging.debug('run analyzer against compilation database')
with open(args.cdb, 'r') as handle:
generator = (dict(cmd, **consts)
for cmd in json.load(handle) if not exclude(
cmd['file'], cmd['directory']))
# when verbose output requested execute sequentially
pool = multiprocessing.Pool(1 if args.verbose > 2 else None)
for current in pool.imap_unordered(run, generator):
if current is not None:
# display error message from the static analyzer
for line in current['error_output']:
logging.info(line.rstrip())
pool.close()
pool.join()
def govern_analyzer_runs(args):
""" Governs multiple runs in CTU mode or runs once in normal mode. """
ctu_config = get_ctu_config_from_args(args)
# If we do a CTU collect (1st phase) we remove all previous collection
# data first.
if ctu_config.collect:
shutil.rmtree(ctu_config.dir, ignore_errors=True)
# If the user asked for a collect (1st) and analyze (2nd) phase, we do an
# all-in-one run where we deliberately remove collection data before and
# also after the run. If the user asks only for a single phase data is
# left so multiple analyze runs can use the same data gathered by a single
# collection run.
if ctu_config.collect and ctu_config.analyze:
# CTU strings are coming from args.ctu_dir and extdef_map_cmd,
# so we can leave it empty
args.ctu_phases = CtuConfig(collect=True, analyze=False,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
merge_ctu_extdef_maps(ctu_config.dir)
args.ctu_phases = CtuConfig(collect=False, analyze=True,
dir='', extdef_map_cmd='')
run_analyzer_parallel(args)
shutil.rmtree(ctu_config.dir, ignore_errors=True)
else:
# Single runs (collect or analyze) are launched from here.
run_analyzer_parallel(args)
if ctu_config.collect:
merge_ctu_extdef_maps(ctu_config.dir)
def setup_environment(args):
""" Set up environment for build command to interpose compiler wrapper. """
environment = dict(os.environ)
environment.update(wrapper_environment(args))
environment.update({
'CC': COMPILER_WRAPPER_CC,
'CXX': COMPILER_WRAPPER_CXX,
'ANALYZE_BUILD_CLANG': args.clang if need_analyzer(args.build) else '',
'ANALYZE_BUILD_REPORT_DIR': args.output,
'ANALYZE_BUILD_REPORT_FORMAT': args.output_format,
'ANALYZE_BUILD_REPORT_FAILURES': 'yes' if args.output_failures else '',
'ANALYZE_BUILD_PARAMETERS': ' '.join(analyzer_params(args)),
'ANALYZE_BUILD_FORCE_DEBUG': 'yes' if args.force_debug else '',
'ANALYZE_BUILD_CTU': json.dumps(get_ctu_config_from_args(args))
})
return environment
def analyze_compiler_wrapper_impl(result, execution):
""" Implements analyzer compiler wrapper functionality. """
# don't run analyzer when compilation fails. or when it's not requested.
if result or not os.getenv('ANALYZE_BUILD_CLANG'):
return
# check is it a compilation?
compilation = split_command(execution.cmd)
if compilation is None:
return
# collect the needed parameters from environment, crash when missing
parameters = {
'clang': os.getenv('ANALYZE_BUILD_CLANG'),
'output_dir': os.getenv('ANALYZE_BUILD_REPORT_DIR'),
'output_format': os.getenv('ANALYZE_BUILD_REPORT_FORMAT'),
'output_failures': os.getenv('ANALYZE_BUILD_REPORT_FAILURES'),
'direct_args': os.getenv('ANALYZE_BUILD_PARAMETERS',
'').split(' '),
'force_debug': os.getenv('ANALYZE_BUILD_FORCE_DEBUG'),
'directory': execution.cwd,
'command': [execution.cmd[0], '-c'] + compilation.flags,
'ctu': get_ctu_config_from_json(os.getenv('ANALYZE_BUILD_CTU'))
}
# call static analyzer against the compilation
for source in compilation.files:
parameters.update({'file': source})
logging.debug('analyzer parameters %s', parameters)
current = run(parameters)
# display error message from the static analyzer
if current is not None:
for line in current['error_output']:
logging.info(line.rstrip())
def analyzer_params(args):
""" A group of command line arguments can mapped to command
line arguments of the analyzer. This method generates those. """
result = []
if args.store_model:
result.append('-analyzer-store={0}'.format(args.store_model))
if args.constraints_model:
result.append('-analyzer-constraints={0}'.format(
args.constraints_model))
if args.internal_stats:
result.append('-analyzer-stats')
if args.analyze_headers:
result.append('-analyzer-opt-analyze-headers')
if args.stats:
result.append('-analyzer-checker=debug.Stats')
if args.maxloop:
result.extend(['-analyzer-max-loop', str(args.maxloop)])
if args.output_format:
result.append('-analyzer-output={0}'.format(args.output_format))
if args.analyzer_config:
result.extend(['-analyzer-config', args.analyzer_config])
if args.verbose >= 4:
result.append('-analyzer-display-progress')
if args.plugins:
result.extend(prefix_with('-load', args.plugins))
if args.enable_checker:
checkers = ','.join(args.enable_checker)
result.extend(['-analyzer-checker', checkers])
if args.disable_checker:
checkers = ','.join(args.disable_checker)
result.extend(['-analyzer-disable-checker', checkers])
return prefix_with('-Xclang', result)
def require(required):
""" Decorator for checking the required values in state.
It checks the required attributes in the passed state and stop when
any of those is missing. """
return decorator
def extdef_map_list_src_to_ast(extdef_src_list):
""" Turns textual external definition map list with source files into an
external definition map list with ast files. """
extdef_ast_list = []
for extdef_src_txt in extdef_src_list:
mangled_name, path = extdef_src_txt.split(" ", 1)
# Normalize path on windows as well
path = os.path.splitdrive(path)[1]
# Make relative path out of absolute
path = path[1:] if path[0] == os.sep else path
ast_path = os.path.join("ast", path + ".ast")
extdef_ast_list.append(mangled_name + " " + ast_path)
return extdef_ast_list
# To have good results from static analyzer certain compiler options shall be
# omitted. The compiler flag filtering only affects the static analyzer run.
#
# Keys are the option name, value number of options to skip
IGNORED_FLAGS = {
'-c': 0, # compile option will be overwritten
'-fsyntax-only': 0, # static analyzer option will be overwritten
'-o': 1, # will set up own output file
# flags below are inherited from the perl implementation.
'-g': 0,
'-save-temps': 0,
'-install_name': 1,
'-exported_symbols_list': 1,
'-current_version': 1,
'-compatibility_version': 1,
'-init': 1,
'-e': 1,
'-seg1addr': 1,
'-bundle_loader': 1,
'-multiply_defined': 1,
'-sectorder': 3,
'--param': 1,
'--serialize-diagnostics': 1
}
def classify_parameters(command):
""" Prepare compiler flags (filters some and add others) and take out
language (-x) and architecture (-arch) flags for future processing. """
result = {
'flags': [], # the filtered compiler flags
'arch_list': [], # list of architecture flags
'language': None, # compilation language, None, if not specified
'compiler': compiler_language(command) # 'c' or 'c++'
}
# iterate on the compile options
args = iter(command[1:])
for arg in args:
# take arch flags into a separate basket
if arg == '-arch':
result['arch_list'].append(next(args))
# take language
elif arg == '-x':
result['language'] = next(args)
# parameters which looks source file are not flags
elif re.match(r'^[^-].+', arg) and classify_source(arg):
pass
# ignore some flags
elif arg in IGNORED_FLAGS:
count = IGNORED_FLAGS[arg]
for _ in range(count):
next(args)
# we don't care about extra warnings, but we should suppress ones
# that we don't want to see.
elif re.match(r'^-W.+', arg) and not re.match(r'^-Wno-.+', arg):
pass
# and consider everything else as compilation flag.
else:
result['flags'].append(arg)
return result
| 39.175525 | 96 | 0.63686 |
dbe1408e84afa0a04966d9f60dcf8a3847bfc25f
| 1,460 |
py
|
Python
|
tableborder.py
|
PIRXrav/pyhack
|
af5c86fb721053d8a3e819ab772c8144a23b86bf
|
[
"MIT"
] | null | null | null |
tableborder.py
|
PIRXrav/pyhack
|
af5c86fb721053d8a3e819ab772c8144a23b86bf
|
[
"MIT"
] | null | null | null |
tableborder.py
|
PIRXrav/pyhack
|
af5c86fb721053d8a3e819ab772c8144a23b86bf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# pylint: disable=C0103
# pylint: disable=R0902
# pylint: disable=R0903
# pylint: disable=R0913
"""
Dfinie la classe TableBorder
"""
BORDERS = [TableBorder('+', '+', '+',\
'+', '+', '+',\
'+', '+', '+',\
'-', '|'),
TableBorder(u'\u250c', u'\u252C', u'\u2510',\
u'\u251C', u'\u253C', u'\u2524',\
u'\u2514', u'\u2534', u'\u2518',\
u'\u2500', u'\u2502'),
TableBorder(u'\u2554', u'\u2566', u'\u2557',\
u'\u2560', u'\u256C', u'\u2563',\
u'\u255a', u'\u2569', u'\u255d',\
u'\u2550', u'\u2551')
]
| 30.416667 | 56 | 0.469863 |
dbe1aa3b9d736d93221a08965b3b705efeef3804
| 216 |
py
|
Python
|
app/urls.py
|
tkf2019/Vue-Django-SAST-Search
|
385af9819c608ce2d0845ed3e786777ff52b52b3
|
[
"MIT"
] | null | null | null |
app/urls.py
|
tkf2019/Vue-Django-SAST-Search
|
385af9819c608ce2d0845ed3e786777ff52b52b3
|
[
"MIT"
] | null | null | null |
app/urls.py
|
tkf2019/Vue-Django-SAST-Search
|
385af9819c608ce2d0845ed3e786777ff52b52b3
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^register/', views.register),
url(r'^login/', views.login),
url(r'logout/', views.logout),
url(r'search/', views.search)
]
| 19.636364 | 39 | 0.643519 |
dbe1d984552acfc78008a25befd61632a445f85d
| 28,508 |
py
|
Python
|
custom_components/hasl/sensor.py
|
Ziqqo/hasl-platform
|
27386314bf58626538d59c38d89249b07ed9256a
|
[
"Apache-2.0"
] | null | null | null |
custom_components/hasl/sensor.py
|
Ziqqo/hasl-platform
|
27386314bf58626538d59c38d89249b07ed9256a
|
[
"Apache-2.0"
] | null | null | null |
custom_components/hasl/sensor.py
|
Ziqqo/hasl-platform
|
27386314bf58626538d59c38d89249b07ed9256a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Simple service for SL (Storstockholms Lokaltrafik)."""
import datetime
import json
import logging
from datetime import timedelta
import homeassistant.helpers.config_validation as cv
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (ATTR_FRIENDLY_NAME, CONF_SCAN_INTERVAL,
CONF_SENSOR_TYPE, CONF_SENSORS, STATE_OFF,
STATE_ON)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import (async_track_point_in_utc_time,
async_track_utc_time_change,
track_time_interval)
from homeassistant.util import Throttle
from homeassistant.util.dt import now
from hasl import (haslapi, fpapi, tl2api, ri4api, si2api,
HASL_Error, HASL_API_Error, HASL_HTTP_Error)
__version__ = '2.2.0'
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'hasl'
# Keys used in the configuration.
CONF_RI4_KEY = 'ri4key'
CONF_SI2_KEY = 'si2key'
CONF_TL2_KEY = 'tl2key'
CONF_SITEID = 'siteid'
CONF_LINES = 'lines'
CONF_DIRECTION = 'direction'
CONF_ENABLED_SENSOR = 'sensor'
CONF_TIMEWINDOW = 'timewindow'
CONF_SENSORPROPERTY = 'property'
CONF_TRAIN_TYPE = 'train_type'
CONF_TRAFFIC_CLASS = 'traffic_class'
CONF_VERSION = 'version_sensor'
CONF_USE_MINIMIZATION = 'api_minimization'
LIST_SENSOR_TYPES = ['departures', 'status', 'trainlocation', 'comb', 'tl2']
LIST_SENSOR_PROPERTIES = ['min', 'time', 'deviations', 'refresh', 'updated']
LIST_TRAIN_TYPES = ['PT', 'RB', 'TVB', 'SB', 'LB', 'SpvC', 'TB1', 'TB2', 'TB3']
# Default values for configuration.
DEFAULT_INTERVAL = timedelta(minutes=10)
DEFAULT_TIMEWINDOW = 30
DEFAULT_DIRECTION = 0
DEFAULT_SENSORPROPERTY = 'min'
DEFAULT_TRAIN_TYPE = 'PT'
DEFAULT_TRAFFIC_CLASS = ['metro', 'train', 'local', 'tram', 'bus', 'fer']
DEFAULT_SENSORTYPE = 'departures'
DEFAULT_CACHE_FILE = '.storage/haslcache.json'
# Defining the configuration schema.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
# API Keys
vol.Optional(CONF_RI4_KEY): cv.string,
vol.Optional(CONF_SI2_KEY): cv.string,
vol.Optional(CONF_TL2_KEY): cv.string,
vol.Optional(CONF_VERSION, default=False): cv.boolean,
vol.Optional(CONF_USE_MINIMIZATION, default=True): cv.boolean,
vol.Required(CONF_SENSORS, default=[]):
vol.All(cv.ensure_list, [vol.All({
vol.Required(ATTR_FRIENDLY_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE, default=DEFAULT_SENSORTYPE):
vol.In(LIST_SENSOR_TYPES),
vol.Optional(CONF_ENABLED_SENSOR): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL):
vol.Any(cv.time_period, cv.positive_timedelta),
vol.Optional(CONF_SITEID): cv.string,
vol.Optional(CONF_LINES, default=[]):
vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_DIRECTION, default=DEFAULT_DIRECTION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=2)),
vol.Optional(CONF_TIMEWINDOW, default=DEFAULT_TIMEWINDOW):
vol.All(vol.Coerce(int), vol.Range(min=0, max=60)),
vol.Optional(CONF_SENSORPROPERTY, default=DEFAULT_SENSORPROPERTY):
vol.In(LIST_SENSOR_PROPERTIES),
vol.Optional(CONF_TRAFFIC_CLASS, default=DEFAULT_TRAFFIC_CLASS):
vol.All(cv.ensure_list, [vol.In(DEFAULT_TRAFFIC_CLASS)]),
vol.Optional(CONF_TRAIN_TYPE, default=DEFAULT_TRAIN_TYPE):
vol.In(LIST_TRAIN_TYPES)
})]),
}, extra=vol.ALLOW_EXTRA)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the sensors."""
if not hass.data.get(DOMAIN):
hass.data[DOMAIN] = {}
sensors = []
if config[CONF_VERSION]:
sensors.append(SLVersionSensor(hass))
_LOGGER.info("Created version sensor for HASL")
for sensorconf in config[CONF_SENSORS]:
if sensorconf[CONF_SENSOR_TYPE] == 'departures' or \
sensorconf[CONF_SENSOR_TYPE] == 'comb':
sitekey = sensorconf.get(CONF_SITEID)
si2key = config.get(CONF_SI2_KEY)
ri4key = config.get(CONF_RI4_KEY)
if sitekey and ri4key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLDeparturesSensor(
hass,
si2key,
ri4key,
sitekey,
sensorconf.get(CONF_LINES),
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_DIRECTION),
sensorconf.get(CONF_TIMEWINDOW),
sensorconf.get(CONF_SENSORPROPERTY),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created departures sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing site, si2key or ri4key",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'status' or \
sensorconf[CONF_SENSOR_TYPE] == 'tl2':
tl2key = config.get(CONF_TL2_KEY)
if tl2key:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLStatusSensor(
hass,
tl2key,
sensorname,
sensorconf.get(CONF_ENABLED_SENSOR),
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_TRAFFIC_CLASS),
config.get(CONF_USE_MINIMIZATION)
))
_LOGGER.info("Created status sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing tl2key attribute",
sensorconf[ATTR_FRIENDLY_NAME])
if sensorconf[CONF_SENSOR_TYPE] == 'trainlocation':
train_type = sensorconf.get(CONF_TRAIN_TYPE)
if train_type:
sensorname = sensorconf[ATTR_FRIENDLY_NAME]
sensors.append(SLTrainLocationSensor(
hass,
sensorname,
train_type,
sensorconf.get(CONF_SCAN_INTERVAL),
sensorconf.get(CONF_ENABLED_SENSOR),
))
_LOGGER.info("Created train sensor %s...", sensorname)
else:
_LOGGER.error("Sensor %s is missing train_type attribute",
sensorconf[ATTR_FRIENDLY_NAME])
add_devices(sensors)
class SLVersionSensor(Entity):
"""HASL Version Sensor."""
class SLDeparturesSensor(Entity):
"""Departure board for one SL site."""
def __init__(self, hass, si2key, ri4key, siteid,
lines, friendly_name, enabled_sensor,
interval, direction, timewindow, sensorproperty,
minimization):
"""Initialize"""
# The table of resulttypes and the corresponding units of measure.
unit_table = {
'min': 'min',
'time': '',
'deviations': '',
'refresh': '',
'update': '',
}
if si2key:
self._si2key = si2key
self._si2api = si2api(si2key, siteid, '')
self._si2datakey = 'si2_' + si2key + '_' + siteid
self._ri4key = ri4key
self._ri4api = ri4api(ri4key, siteid, 60)
self._ri4datakey = 'ri2_' + ri4key + '_' + siteid
self._hass = hass
self._name = friendly_name
self._lines = lines
self._siteid = siteid
self._enabled_sensor = enabled_sensor
self._sensorproperty = sensorproperty
self._departure_table = []
self._deviations_table = []
self._direction = direction
self._timewindow = timewindow
self._nextdeparture_minutes = '0'
self._nextdeparture_expected = '-'
self._lastupdate = '-'
self._interval = interval
self._unit_of_measure = unit_table.get(self._sensorproperty, 'min')
self._cachefile = hass.config.path(DEFAULT_CACHE_FILE)
self._minimization = minimization
if not hass.data[DOMAIN].get(self._ri4datakey):
hass.data[DOMAIN][self._ri4datakey] = ''
if self._si2key:
if not hass.data[DOMAIN].get(self._si2datakey):
hass.data[DOMAIN][self._si2datakey] = ''
# Setup updating of the sensor.
self.update = Throttle(interval)(self._update)
def parseDepartureTime(self, t):
""" weird time formats from the API,
do some quick and dirty conversions. """
try:
if t == 'Nu':
return 0
s = t.split()
if len(s) > 1 and s[1] == 'min':
return int(s[0])
s = t.split(':')
if len(s) > 1:
rightnow = now(self._hass.config.time_zone)
min = int(s[0]) * 60 + int(s[1]) - (rightnow.hour * 60 +
rightnow.minute)
if min < 0:
min = min + 1440
return min
except Exception:
_LOGGER.warning("Failed to parse departure time (%s) ", t)
return 0
def getCache(self, key):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
return data.get(key)
except:
return {}
def putCache(self, key, value):
try:
jsonFile = open(self._cachefile, 'r')
data = json.load(jsonFile)
jsonFile.close()
data[key] = value
except:
data = {'' + key + '': value}
jsonFile = open(self._cachefile, 'w')
jsonFile.write(json.dumps(data))
jsonFile.close()
def _update(self):
"""Get the departure board."""
# If using external sensor, get its value.
if self._enabled_sensor is not None:
sensor_state = self._hass.states.get(self._enabled_sensor)
# If we dont have external sensor or it is ON then proceed.
if self._enabled_sensor is None or sensor_state.state \
is STATE_ON:
self._update_ri4()
if self._si2key:
self._update_si2()
self._lastupdate = now(self._hass.config.time_zone)
| 35.369727 | 79 | 0.542058 |
dbe25f137db8fdda41fdc3006d42e7f6d84f1a1d
| 2,067 |
py
|
Python
|
simbad_tools.py
|
ishivvers/astro
|
ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d
|
[
"MIT"
] | 1 |
2015-12-06T00:19:35.000Z
|
2015-12-06T00:19:35.000Z
|
simbad_tools.py
|
ishivvers/astro
|
ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d
|
[
"MIT"
] | null | null | null |
simbad_tools.py
|
ishivvers/astro
|
ff3f3b9f8ef4013157c277bbb5bf82ac1bd3287d
|
[
"MIT"
] | null | null | null |
"""
A quick library to deal with searching simbad for info
about a SN and parsing the results.
Author: Isaac Shivvers, [email protected], 2014
example SIMBAD uri query:
http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=sn%201998S
"""
import re
from urllib2 import urlopen
def get_SN_info( name ):
"""
Queries simbad for SN coords, redshift, and host galaxy.
If redshift is not given for SN, attempts to resolve link to
host galaxy and report its redshift.
Returns ( (ra,dec), redshift, host_name, redshift_citation ), with
values of None inserted whenever it cannot resolve the value.
"""
simbad_uri = "http://simbad.u-strasbg.fr/simbad/sim-id?output.format=ASCII&Ident=%s"
regex_coords = "Coordinates\(FK5.+\): .+"
regex_redshift = "Redshift:\s+\d+\.\d+.+"
regex_host = "apparent\s+host\s+galaxy\s+.+?\{(.*?)\}"
result = urlopen( simbad_uri % name.replace(' ','%20') ).read()
rescoords = re.search( regex_coords, result )
resred = re.search( regex_redshift, result )
reshost = re.search( regex_host, result )
try:
cs = rescoords.group().split(':')[1].strip()
ra = cs[:12].strip()
dec = cs[12:].strip()
except:
ra,dec = None,None
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
redshift = None
citation = None
try:
host = reshost.group().split('{')[1].split('}')[0]
except AttributeError:
host = None
if (redshift == None) and (host != None):
# get the redshift from the host galaxy
result = urlopen( simbad_uri % host.replace(' ','%20') ).read()
resred = re.search( regex_redshift, result )
try:
redshift = float(resred.group().strip('Redshift: ').split(' ')[0])
citation = resred.group().split(' ')[-1]
except AttributeError:
pass
return ((ra,dec), redshift, host, citation)
| 32.296875 | 88 | 0.610063 |
dbe3699b610c3c766074b1340770c91698f9123b
| 15,647 |
py
|
Python
|
robots/environments.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | 5 |
2021-03-07T11:42:11.000Z
|
2022-02-28T09:46:05.000Z
|
robots/environments.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | null | null | null |
robots/environments.py
|
StanfordASL/soft-robot-control
|
29ade9b7b952e25e639b42767a4f09c87a0e824a
|
[
"MIT"
] | 3 |
2021-01-23T11:09:40.000Z
|
2022-03-02T11:54:57.000Z
|
import os
from math import cos
from math import sin
import Sofa.Core
from splib.numerics import Quat, Vec3
from sofacontrol import measurement_models
path = os.path.dirname(os.path.abspath(__file__))
| 49.83121 | 148 | 0.547964 |
dbe3e139f969d2b0c02202b763923425574d8d2e
| 2,764 |
py
|
Python
|
default.py
|
SimonPreissner/get-shifty
|
aff49220932921c77e419a34ca472b51e0b26b72
|
[
"MIT"
] | null | null | null |
default.py
|
SimonPreissner/get-shifty
|
aff49220932921c77e419a34ca472b51e0b26b72
|
[
"MIT"
] | null | null | null |
default.py
|
SimonPreissner/get-shifty
|
aff49220932921c77e419a34ca472b51e0b26b72
|
[
"MIT"
] | null | null | null |
"""
This file contains meta information and default configurations of the project
"""
RSC_YEARS = [1660, 1670, 1680, 1690,
1700, 1710, 1720, 1730, 1740, 1750, 1760, 1770, 1780, 1790,
1800, 1810, 1820, 1830, 1840, 1850, 1860, 1870, 1880, 1890,
1900, 1910, 1920]
# cf. Chapter 4.4.1 of the thesis
SPACE_PAIR_SELECTION = [(1740,1750), (1750,1760),
(1680,1710), (1710,1740), (1740,1770), (1770,1800), (1800,1830), (1830,1860), (1860,1890),
(1700,1800), (1800,1900),
(1700,1900)]
COUPLING_CONFIG = { # Alternatives
# parameters passed to the GWOT object
'metric': "cosine", # 'euclidian',
'normalize_vecs': "both", # 'mean', 'whiten', 'whiten_zca'
'normalize_dists': "mean", # 'max', 'median'
'score_type': "coupling", # #TODO fill in the rest of the options in the comments
'adjust': None, # 'csls', ...
'distribs': "uniform", # 'custom', 'zipf'
'share_vocs':False, # True
'size':1000, # 100 is small, 1e4
'max_anchors':100, # used with small couplings (for projection)
# parameters to be passed to the optimizer
'opt_loss_fun': "square_loss", # 'kl_loss'
'opt_entropic': True, # False
'opt_entreg': 5e-4, # stay within the range of e-4 (originally: 1e-4)
'opt_tol': 1e-9, # no limits
'opt_round_g': False, # True
'opt_compute_accuracy': False, # True would require a test dict, but that's not implemented!
'opt_gpu': False, # GPU optimization not tested
# parameters for calling fit()
'fit_maxiter': 300, # no limits; normally converges within 150 iterations
'fit_tol': 1e-9, # no limits
'fit_plot_every': 100000, # normally 20; 'deactivate' the file spam by choosing a large value
'fit_print_every': 1, # no limits
'fit_verbose': True, # False
'fit_save_plots': None # "/my_dir/my_optimizer_plots"
}
DIST_SHAPES = ['uniform', 'zipf', 'custom']
SHIFT_EXPERIMENTS = ["all",
"unsup_bi",
"unsup_mono",
"dis_tech"]
| 52.150943 | 119 | 0.458032 |
dbe3f5f2703d36ffae51f8561d55eb622bc98049
| 21,019 |
py
|
Python
|
generate_training_data_drb.py
|
SimonTopp/Graph-WaveNet
|
ef63a80cc397744667a5d27f7c410c10e3e03a4c
|
[
"MIT"
] | null | null | null |
generate_training_data_drb.py
|
SimonTopp/Graph-WaveNet
|
ef63a80cc397744667a5d27f7c410c10e3e03a4c
|
[
"MIT"
] | null | null | null |
generate_training_data_drb.py
|
SimonTopp/Graph-WaveNet
|
ef63a80cc397744667a5d27f7c410c10e3e03a4c
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
import util
import os.path
import pandas as pd
import numpy as np
import yaml
import xarray as xr
import datetime
import pickle
def scale(dataset, std=None, mean=None):
"""
scale the data so it has a standard deviation of 1 and a mean of zero
:param dataset: [xr dataset] input or output data
:param std: [xr dataset] standard deviation if scaling test data with dims
:param mean: [xr dataset] mean if scaling test data with dims
:return: scaled data with original dims
"""
if not isinstance(std, xr.Dataset) or not isinstance(mean, xr.Dataset):
std = dataset.std(skipna=True)
mean = dataset.mean(skipna=True)
# adding small number in case there is a std of zero
scaled = (dataset - mean) / (std + 1e-10)
check_if_finite(std)
check_if_finite(mean)
return scaled, std, mean
def sel_partition_data(dataset, start_dates, end_dates):
"""
select the data from a date range or a set of date ranges
:param dataset: [xr dataset] input or output data with date dimension
:param start_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to start period
(can have multiple discontinuos periods)
:param end_dates: [str or list] fmt: "YYYY-MM-DD"; date(s) to end period
(can have multiple discontinuos periods)
:return: dataset of just those dates
"""
# if it just one date range
if isinstance(start_dates, str):
if isinstance(end_dates, str):
return dataset.sel(date=slice(start_dates, end_dates))
else:
raise ValueError("start_dates is str but not end_date")
# if it's a list of date ranges
elif isinstance(start_dates, list) or isinstance(start_dates, tuple):
if len(start_dates) == len(end_dates):
data_list = []
for i in range(len(start_dates)):
date_slice = slice(start_dates[i], end_dates[i])
data_list.append(dataset.sel(date=date_slice))
return xr.concat(data_list, dim="date")
else:
raise ValueError("start_dates and end_dates must have same length")
else:
raise ValueError("start_dates must be either str, list, or tuple")
def separate_trn_tst(
dataset,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
):
"""
separate the train data from the test data according to the start and end
dates. This assumes your training data is in one continuous block and all
the dates that are not in the training are in the testing.
:param dataset: [xr dataset] input or output data with dims
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
"""
train = sel_partition_data(dataset, train_start_date, train_end_date)
val = sel_partition_data(dataset, val_start_date, val_end_date)
test = sel_partition_data(dataset, test_start_date, test_end_date)
return train, val, test
def split_into_batches(data_array, seq_len=365, offset=1):
"""
split training data into batches with size of batch_size
:param data_array: [numpy array] array of training data with dims [nseg,
ndates, nfeat]
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched data with dims [nbatches, nseg, seq_len
(batch_size), nfeat]
"""
combined = []
for i in range(int(1 / offset)):
start = int(i * offset * seq_len)
idx = np.arange(start=start, stop=data_array.shape[1] + 1, step=seq_len)
split = np.split(data_array, indices_or_sections=idx, axis=1)
# add all but the first and last batch since they will be smaller
combined.extend([s for s in split if s.shape[1] == seq_len])
combined = np.asarray(combined)
return combined
def read_multiple_obs(obs_files, x_data):
"""
read and format multiple observation files. we read in the pretrain data to
make sure we have the same indexing.
:param obs_files: [list] list of filenames of observation files
:param pre_train_file: [str] the file of pre_training data
:return: [xr dataset] the observations in the same time
"""
obs = [x_data.sortby(["seg_id_nat", "date"])]
for filename in obs_files:
ds = xr.open_zarr(filename)
obs.append(ds)
if "site_id" in ds.variables:
del ds["site_id"]
obs = xr.merge(obs, join="left")
obs = obs[["temp_c", "discharge_cms"]]
obs = obs.rename(
{"temp_c": "seg_tave_water", "discharge_cms": "seg_outflow"}
)
return obs
def reshape_for_training(data):
"""
reshape the data for training
:param data: training data (either x or y or mask) dims: [nbatch, nseg,
len_seq, nfeat/nout]
:return: reshaped data [nbatch * nseg, len_seq, nfeat/nout]
"""
n_batch, n_seg, seq_len, n_feat = data.shape
return np.reshape(data, [n_batch * n_seg, seq_len, n_feat])
def get_exclude_start_end(exclude_grp):
"""
get the start and end dates for the exclude group
:param exclude_grp: [dict] dictionary representing the exclude group from
the exclude yml file
:return: [tuple of datetime objects] start date, end date
"""
start = exclude_grp.get("start_date")
if start:
start = datetime.datetime.strptime(start, "%Y-%m-%d")
end = exclude_grp.get("end_date")
if end:
end = datetime.datetime.strptime(end, "%Y-%m-%d")
return start, end
def convert_batch_reshape(dataset, seq_len=365, offset=1, y = False, period = np.nan):
"""
convert xarray dataset into numpy array, swap the axes, batch the array and
reshape for training
:param dataset: [xr dataset] data to be batched
:param seq_len: [int] length of sequences (i.e., 365)
:param offset: [float] 0-1, how to offset the batches (e.g., 0.5 means that
the first batch will be 0-365 and the second will be 182-547)
:return: [numpy array] batched and reshaped dataset
"""
# convert xr.dataset to numpy array
dataset = dataset.transpose("seg_id_nat", "date")
arr = dataset.to_array().values
# if the dataset is empty, just return it as is
if dataset.date.size == 0:
return arr
# before [nfeat, nseg, ndates]; after [nseg, ndates, nfeat]
# this is the order that the split into batches expects
arr = np.moveaxis(arr, 0, -1)
# batch the data
# after [nbatch, nseg, seq_len, nfeat]
batched = split_into_batches(arr, seq_len=seq_len, offset=offset)
# reshape data
# after [nseq, seq_len, nseg, nfeat]
#reshaped = reshape_for_training(batched)
reshaped = np.moveaxis(batched, [0,1,2,3], [0,2,1,3])
if y & np.isfinite(period):
reshaped = reshaped[:,-period:,...]
return reshaped
def prep_data(
obs_temper_file,
obs_flow_file,
pretrain_file,
#distfile,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
x_vars=None,
y_vars= ["seg_tave_water", "seg_outflow"],
seq_length = 365,
offset = 1,
period = None,
primary_variable="temp",
#catch_prop_file=None,
#exclude_file=None,
#log_q=False,
out_file=None,
#segs=None,
normalize_y=False,
):
"""
prepare input and output data for DL model training read in and process
data into training and testing datasets. the training and testing data are
scaled to have a std of 1 and a mean of zero
:param obs_temper_file: [str] temperature observations file (csv)
:param obs_flow_file:[str] discharge observations file (csv)
:param pretrain_file: [str] the file with the pretraining data (SNTemp data)
:param distfile: [str] path to the distance matrix .npz file
:param train_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
train period (can have multiple discontinuos periods)
:param train_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end train
period (can have multiple discontinuos periods)
:param val_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
validation period (can have multiple discontinuos periods)
:param val_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end
validation period (can have multiple discontinuos periods)
:param test_start_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to start
test period (can have multiple discontinuos periods)
:param test_end_date: [str or list] fmt: "YYYY-MM-DD"; date(s) to end test
period (can have multiple discontinuos periods)
:param x_vars: [list] variables that should be used as input. If None, all
of the variables will be used
:param primary_variable: [str] which variable the model should focus on
'temp' or 'flow'. This determines the order of the variables.
:param catch_prop_file: [str] the path to the catchment properties file. If
left unfilled, the catchment properties will not be included as predictors
:param exclude_file: [str] path to exclude file
:param log_q: [bool] whether or not to take the log of discharge in training
:param out_file: [str] file to where the values will be written
:returns: training and testing data along with the means and standard
deviations of the training input and output data
'y_trn_pre': batched, scaled, and centered output data for entire
period of record of SNTemp [n_samples, seq_len, n_out]
'y_obs_trn': batched, scaled, and centered output observation data
for the training period
'y_trn_obs_std': standard deviation of the y observations training
data [n_out]
'y_trn_obs_mean': mean of the observation training data [n_out]
'y_obs_tst': un-batched, unscaled, uncentered observation data for
the test period [n_yrs, n_seg, len_seq, n_out]
'dates_ids_trn: batched dates and national seg ids for training data
[n_samples, seq_len, 2]
'dates_ids_tst: un-batched dates and national seg ids for testing
data [n_yrs, n_seg, len_seq, 2]
"""
ds_pre = xr.open_zarr(pretrain_file)
x_data = ds_pre[x_vars]
# make sure we don't have any weird input values
check_if_finite(x_data)
x_trn, x_val, x_tst = separate_trn_tst(
x_data,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
x_scl, x_std, x_mean = scale(x_data)
x_trn_scl, _, _ = scale(x_trn, std=x_std, mean=x_mean)
x_val_scl, _, _ = scale(x_val, std=x_std, mean=x_mean)
x_tst_scl, _, _ = scale(x_tst, std=x_std, mean=x_mean)
y_obs = read_multiple_obs([obs_temper_file, obs_flow_file], x_data)
y_obs = y_obs[y_vars]
y_pre = ds_pre[y_vars]
y_obs_trn, y_obs_val, y_obs_tst = separate_trn_tst(
y_obs,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
y_pre_trn, y_pre_val, y_pre_tst = separate_trn_tst(
y_pre,
train_start_date,
train_end_date,
val_start_date,
val_end_date,
test_start_date,
test_end_date,
)
if normalize_y:
# scale y training data and get the mean and std
y_obs_trn, y_std, y_mean = scale(y_obs_trn)
y_pre_trn, _, _ = scale(y_pre_trn, y_std, y_mean)
else:
_, y_std, y_mean = scale(y_obs_trn)
data = {
"x_train": convert_batch_reshape(x_trn_scl, offset=offset, seq_len=seq_length),
"x_val": convert_batch_reshape(x_val_scl, offset=offset, seq_len=seq_length),
"x_test": convert_batch_reshape(x_tst_scl, offset=offset, seq_len=seq_length),
"x_std": x_std.to_array().values,
"x_mean": x_mean.to_array().values,
"x_cols": np.array(x_vars),
"ids_train": coord_as_reshaped_array(x_trn, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_train": coord_as_reshaped_array(x_trn, "date", offset=offset, seq_len=seq_length),
"ids_val": coord_as_reshaped_array(x_val, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_val": coord_as_reshaped_array(x_val, "date", offset=offset, seq_len=seq_length),
"ids_test": coord_as_reshaped_array(x_tst, "seg_id_nat", offset=offset, seq_len=seq_length),
"dates_test": coord_as_reshaped_array(x_tst, "date", offset=offset, seq_len=seq_length),
"y_pre_train": convert_batch_reshape(y_pre_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_train": convert_batch_reshape(y_obs_trn, offset=offset, seq_len=seq_length, y=True, period=period),
"y_val": convert_batch_reshape(y_obs_val, offset=offset, seq_len=seq_length, y=True, period=period),
"y_test": convert_batch_reshape(y_obs_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_vars": np.array(y_vars),
'period': np.array([period]),
'y_pre_train_val': convert_batch_reshape(y_pre_val, offset=offset, seq_len=seq_length, y=True, period=period),
'y_pre_train_test': convert_batch_reshape(y_pre_tst, offset=offset, seq_len=seq_length, y=True, period=period),
"y_std": y_std.to_array().values,
"y_mean": y_mean.to_array().values,
}
if out_file:
if os.path.isdir(out_file) == False:
os.makedirs(out_file)
'''
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_obs_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_obs_tst'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_obs_val'],
)
'''
np.savez_compressed(os.path.join(out_file,'data.npz'), **data)
return data
def prep_adj_matrix(infile, dist_type, out_file=None):
"""
process adj matrix.
**The resulting matrix is sorted by seg_id_nat **
:param infile:
:param dist_type: [str] type of distance matrix ("upstream", "downstream" or
"updown")
:param out_file:
:return: [numpy array] processed adjacency matrix
"""
adj_matrices = np.load(infile)
adj = adj_matrices[dist_type]
adj_full = sort_dist_matrix(adj, adj_matrices["rowcolnames"])
adj = adj_full[2]
adj = np.where(np.isinf(adj), 0, adj)
adj = -adj
mean_adj = np.mean(adj[adj != 0])
std_adj = np.std(adj[adj != 0])
adj[adj != 0] = adj[adj != 0] - mean_adj
adj[adj != 0] = adj[adj != 0] / std_adj
adj[adj != 0] = 1 / (1 + np.exp(-adj[adj != 0]))
I = np.eye(adj.shape[0])
A_hat = adj.copy() + I
D = np.sum(A_hat, axis=1)
D_inv = D ** -1.0
D_inv = np.diag(D_inv)
A_hat = np.matmul(D_inv, A_hat)
if out_file:
out_dm = [adj_full[0], adj_full[1], A_hat]
with open(out_file+'.pkl', 'wb') as f:
pickle.dump(out_dm, f, protocol=2)
return adj_full[0], adj_full[1], A_hat
def sort_dist_matrix(mat, row_col_names):
"""
sort the distance matrix by seg_id_nat
:return:
"""
df = pd.DataFrame(mat, columns=row_col_names, index=row_col_names)
df = df.sort_index(axis=0)
df = df.sort_index(axis=1)
sensor_id_to_ind = {}
for i, sensor_id in enumerate(df.columns):
sensor_id_to_ind[sensor_id] = i
return row_col_names, sensor_id_to_ind, df
#check = prep_adj_matrix('../../gits/river-dl/DRB_data/distance_matrix.npz', 'upstream', 'data/DRB_gwn_full/adj_mx')
#if __name__ == "__main__":
check2 = prep_data(obs_temper_file='../../gits/river-dl/DRB_data/obs_temp_full',
obs_flow_file='../../gits/river-dl/DRB_data/obs_flow_full',
pretrain_file='../../gits/river-dl/DRB_data/uncal_sntemp_input_output',
train_start_date=['1985-10-01', '2016-10-01'],
train_end_date=['2006-09-30', '2020-09-30'],
val_start_date='2006-10-01',
val_end_date='2016-09-30',
test_start_date=['1980-10-01', '2020-10-01'],
test_end_date=['1985-09-30', '2021-09-30'],
x_vars=["seg_rain", "seg_tave_air", "seginc_swrad", "seg_length", "seginc_potet", "seg_slope", "seg_humid",
"seg_elev"],
y_vars=['seg_tave_water'],
primary_variable='temp',
seq_length=365,
period=np.nan,
offset=1,
out_file = 'data/DRB_gwn_full')
'''f __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--output_dir", type=str, default="data/METR-LA", help="Output directory.")
parser.add_argument("--traffic_df_filename", type=str, default="data/metr-la.h5", help="Raw traffic readings.",)
parser.add_argument("--seq_length_x", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--seq_length_y", type=int, default=12, help="Sequence Length.",)
parser.add_argument("--y_start", type=int, default=1, help="Y pred start", )
parser.add_argument("--dow", action='store_true',)
args = parser.parse_args()
if os.path.exists(args.output_dir):
reply = str(input(f'{args.output_dir} exists. Do you want to overwrite it? (y/n)')).lower().strip()
if reply[0] != 'y': exit
else:
os.makedirs(args.output_dir)
generate_train_val_test(args)
##### Reformat our inputs to match theirs.
df = pd.read_hdf("data/metr-la.h5")
seq_length_x = 12
seq_length_y = 12
y_start = 1
LAtrain = np.load('data/METR-LA/train.npz')
LAtest = np.load('data/METR-LA/test.npz')
LAval = np.load('data/METR-LA/val.npz')
LAtrain['x'].shape
LAtrain['y'].shape
LAtest['x'].shape
LAtest['y'].shape
check = np.moveaxis(data['x_train'], [0,1,2,3], [0,2,1,3])
np.savez_compressed(os.path.join(out_file, 'pre_train.npz'),
x=data['x_train'],
y=data['y_pre_train'])
np.savez_compressed(os.path.join(out_file,'train.npz'),
x=data['x_train'],
y=data['y_pre_train'],
)
np.savez_compressed(os.path.join(out_file, 'test.npz'),
x=data['x_test'],
y=data['y_pre_test'],
)
np.savez_compressed(os.path.join(out_file,'val.npz'),
x=data['x_val'],
y=data['y_pre_val'],
)
'''
| 39.658491 | 120 | 0.63709 |
dbe414ec11ed223b8f3b005ec5b7199d7a73066f
| 3,737 |
py
|
Python
|
Phase-1/Python Basic 1/Day-3.py
|
CodedLadiesInnovateTech/python-challenges
|
22ce26c68fea6c7c243ada831e47c52e27a62127
|
[
"MIT"
] | 11 |
2020-05-11T08:41:21.000Z
|
2022-02-27T08:21:37.000Z
|
Phase-1/Python Basic 1/Day-3.py
|
CodedLadiesInnovateTech/python-challenges
|
22ce26c68fea6c7c243ada831e47c52e27a62127
|
[
"MIT"
] | 9 |
2020-05-12T10:46:06.000Z
|
2020-05-28T17:37:19.000Z
|
Phase-1/Python Basic 1/Day-3.py
|
CodedLadiesInnovateTech/python-challenges
|
22ce26c68fea6c7c243ada831e47c52e27a62127
|
[
"MIT"
] | 44 |
2020-05-10T20:53:32.000Z
|
2021-04-25T18:47:08.000Z
|
<<<<<<< HEAD
"""
1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
Sample function : abs()
Expected Result :
abs(number) -> number
Return the absolute value of the argument.
Tools: help function
2. Write a Python program to print the calendar of a given month and year.
Tools: Use 'calendar' module.
3. Write a Python program to print the following here document.
Sample string :
a string that you "don't" have to escape
This
is a ....... multi-line
heredoc string --------> example
Tools: string formating
4. Write a Python program to calculate number of days between two dates.
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
Tools: Datetime module, timedelta module
5. Write a Python program to get the volume of a sphere with radius 6.
Tools: input function, math
6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference.
Tools: abs function, input function, math
7. Write a Python program to test whether a number is within 100 of 1000 or 2000.
Tools: maths,input function
8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum.
Tools: math, input function
9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged.
Tools: input function, string formating
10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing
=======
"""
1. Write a Python program to print the documents (syntax, description etc.) of Python built-in function(s).
Sample function : abs()
Expected Result :
abs(number) -> number
Return the absolute value of the argument.
Tools: help function
2. Write a Python program to print the calendar of a given month and year.
Tools: Use 'calendar' module.
3. Write a Python program to print the following here document.
Sample string :
a string that you "don't" have to escape
This
is a ....... multi-line
heredoc string --------> example
Tools: string formating
4. Write a Python program to calculate number of days between two dates.
Sample dates : (2014, 7, 2), (2014, 7, 11)
Expected output : 9 days
Tools: Datetime module, timedelta module
5. Write a Python program to get the volume of a sphere with radius 6.
Tools: input function, math
6. Write a Python program to get the difference between a given number and 17, if the number is greater than 17 return double the absolute difference.
Tools: abs function, input function, math
7. Write a Python program to test whether a number is within 100 of 1000 or 2000.
Tools: maths,input function
8. Write a Python program to calculate the sum of three given numbers, if the values are equal then return three times of their sum.
Tools: math, input function
9. Write a Python program to get a new string from a given string where "Is" has been added to the front. If the given string already begins with "Is" then return the string unchanged.
Tools: input function, string formating
10. Write a Python program to get a string which is n (non-negative integer) copies of a given string.
Tools: input function, slicing
>>>>>>> f4444ec0d72c645d12694e90df7429456db0611c
"""
| 35.932692 | 185 | 0.690393 |
dbe44f6e05680f0d1dad7aaee47f96f07f3de643
| 2,128 |
py
|
Python
|
tests/python/metaclass_inheritance.py
|
gmgunter/pyre
|
e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2
|
[
"BSD-3-Clause"
] | 25 |
2018-04-23T01:45:39.000Z
|
2021-12-10T06:01:23.000Z
|
tests/python/metaclass_inheritance.py
|
gmgunter/pyre
|
e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2
|
[
"BSD-3-Clause"
] | 53 |
2018-05-31T04:55:00.000Z
|
2021-10-07T21:41:32.000Z
|
tests/python/metaclass_inheritance.py
|
gmgunter/pyre
|
e9ff3f8c04661f8b2cd2ba0caded08b6fe8054e2
|
[
"BSD-3-Clause"
] | 12 |
2018-04-23T22:50:40.000Z
|
2022-02-20T17:27:23.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# michael a.g. avzis
# orthologue
# (c) 1998-2021 all rights reserved
#
#
"""
When a metaclass understands the extra keywords that can be passed during class declaration,
it has to override all these to accommodate the change in signature
"""
def test():
b = base()
d = derived()
return
# main
if __name__ == "__main__":
test()
# end of file
| 25.035294 | 92 | 0.56156 |
dbe6b1bbfa7c8868231f9a2e70cb8975c45626ee
| 434 |
py
|
Python
|
cs101/module8/8-1/chroma1.py
|
idsdlab/basicai_sp21
|
af9acba34c0417fed830de1b61753c50fd303169
|
[
"MIT"
] | 1 |
2021-03-23T16:18:00.000Z
|
2021-03-23T16:18:00.000Z
|
cs101/module8/8-1/chroma1.py
|
idsdlab/basicai_sp21
|
af9acba34c0417fed830de1b61753c50fd303169
|
[
"MIT"
] | null | null | null |
cs101/module8/8-1/chroma1.py
|
idsdlab/basicai_sp21
|
af9acba34c0417fed830de1b61753c50fd303169
|
[
"MIT"
] | null | null | null |
from cs1media import *
import math
statue = load_picture("photos/statue1.jpg")
chroma(statue, (41, 75, 146), 70)
statue.show()
| 20.666667 | 56 | 0.582949 |
dbe72fbf88b8bf3f7bd1a038ff09959ccc113054
| 3,433 |
py
|
Python
|
wfirst_stars/mklc.py
|
RuthAngus/wfirst_stars
|
60989fc56488ac915082e76c3088c6133909985b
|
[
"MIT"
] | null | null | null |
wfirst_stars/mklc.py
|
RuthAngus/wfirst_stars
|
60989fc56488ac915082e76c3088c6133909985b
|
[
"MIT"
] | null | null | null |
wfirst_stars/mklc.py
|
RuthAngus/wfirst_stars
|
60989fc56488ac915082e76c3088c6133909985b
|
[
"MIT"
] | null | null | null |
import numpy as np
import scipy
import scipy.io
import pylab
import numpy
import glob
import pyfits
| 31.787037 | 82 | 0.605884 |
dbe7a0b13a437a6e05e68098ff2efe008a915ee9
| 862 |
py
|
Python
|
bin/sort.py
|
pelavarre/pybashish
|
03f74356fb0a2a0ef7106f09c059fd9b375ce89a
|
[
"CNRI-Python"
] | 4 |
2020-07-10T20:16:13.000Z
|
2022-02-16T02:11:20.000Z
|
bin/sort.py
|
pelavarre/pybashish
|
03f74356fb0a2a0ef7106f09c059fd9b375ce89a
|
[
"CNRI-Python"
] | null | null | null |
bin/sort.py
|
pelavarre/pybashish
|
03f74356fb0a2a0ef7106f09c059fd9b375ce89a
|
[
"CNRI-Python"
] | 2 |
2020-06-24T20:37:36.000Z
|
2020-07-10T20:16:17.000Z
|
#!/usr/bin/env python3
"""
usage: sort.py [-h]
sort lines
options:
-h, --help show this help message and exit
quirks:
sorts tabs as different than spaces
sorts some spaces ending a line as different than none ending a line
examples:
Oh no! No examples disclosed!!
"""
# FIXME: doc -k$N,$N and -n and maybe little else is worth learning
# FIXME: ass -k-1,-1 for negative field indexing
# FIXME: think into the mess at "sort" vs "LC_ALL=C sort"
import sys
import argdoc
if __name__ == "__main__":
main()
# copied from: git clone https://github.com/pelavarre/pybashish.git
| 21.02439 | 70 | 0.678654 |
dbe8f2379002738c1c16e7f2d3cd857e1c75e38f
| 10,561 |
py
|
Python
|
davan/http/service/telldus/tdtool.py
|
davandev/davanserver
|
0be914268c8e34d4092251508bae213cff3ef621
|
[
"MIT"
] | null | null | null |
davan/http/service/telldus/tdtool.py
|
davandev/davanserver
|
0be914268c8e34d4092251508bae213cff3ef621
|
[
"MIT"
] | null | null | null |
davan/http/service/telldus/tdtool.py
|
davandev/davanserver
|
0be914268c8e34d4092251508bae213cff3ef621
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys, getopt, httplib, urllib, json, os
import oauth.oauth as oauth
import datetime
from configobj import ConfigObj
import logging
global logger
logger = logging.getLogger(os.path.basename(__file__))
import davan.util.application_logger as log_manager
#insert your own public_key and private_key
import davan.config.config_creator as config_creator
configuration = config_creator.create()
PUBLIC_KEY = configuration["TELLDUS_PUBLIC_KEY"]
PRIVATE_KEY = configuration["TELLDUS_PRIVATE_KEY"]
TELLSTICK_TURNON = 1
TELLSTICK_TURNOFF = 2
TELLSTICK_BELL = 4
TELLSTICK_DIM = 16
TELLSTICK_UP = 128
TELLSTICK_DOWN = 256
SUPPORTED_METHODS = TELLSTICK_TURNON | TELLSTICK_TURNOFF | TELLSTICK_BELL | TELLSTICK_DIM | TELLSTICK_UP | TELLSTICK_DOWN;
if __name__ == "__main__":
config = ConfigObj(os.environ['HOME'] + '/.config/Telldus/tdtool.conf')
configuration = config_creator.create()
log_manager.start_logging(configuration["LOGFILE_PATH"],loglevel=4)
main(sys.argv[1:])
| 35.921769 | 170 | 0.670675 |
dbe91e2d448902d1659cd842f7d5834596d34306
| 16,286 |
py
|
Python
|
ichnaea/data/export.py
|
rajreet/ichnaea
|
7bd2eaa9568f9004e566b802623299625c29f5ae
|
[
"Apache-2.0"
] | 348 |
2015-01-13T11:48:07.000Z
|
2022-03-31T08:33:07.000Z
|
ichnaea/data/export.py
|
rajreet/ichnaea
|
7bd2eaa9568f9004e566b802623299625c29f5ae
|
[
"Apache-2.0"
] | 1,274 |
2015-01-02T18:15:56.000Z
|
2022-03-23T15:29:08.000Z
|
ichnaea/data/export.py
|
rajreet/ichnaea
|
7bd2eaa9568f9004e566b802623299625c29f5ae
|
[
"Apache-2.0"
] | 149 |
2015-01-04T21:15:07.000Z
|
2021-12-10T06:05:09.000Z
|
from collections import defaultdict
import json
import re
import time
from urllib.parse import urlparse
import uuid
import boto3
import boto3.exceptions
import botocore.exceptions
import markus
import redis.exceptions
import requests
import requests.exceptions
from sqlalchemy import select
import sqlalchemy.exc
from ichnaea.data import _map_content_enabled
from ichnaea.models import (
ApiKey,
BlueObservation,
BlueReport,
BlueShard,
CellObservation,
CellReport,
CellShard,
DataMap,
ExportConfig,
Report,
WifiObservation,
WifiReport,
WifiShard,
)
from ichnaea.models.content import encode_datamap_grid
from ichnaea import util
WHITESPACE = re.compile(r"\s", flags=re.UNICODE)
METRICS = markus.get_metrics()
| 31.933333 | 86 | 0.560666 |
dbe92a131f4e410b11bc7e2f634cf6f5bfadbd7f
| 6,636 |
py
|
Python
|
test/inference_correctness/dcn_multi_hot.py
|
x-y-z/HugeCTR
|
17bf942215df60827ece9dc015af5191ef9219b7
|
[
"Apache-2.0"
] | 130 |
2021-10-11T11:55:28.000Z
|
2022-03-31T21:53:07.000Z
|
test/inference_correctness/dcn_multi_hot.py
|
Teora/HugeCTR
|
c55a63401ad350669ccfcd374aefd7a5fc879ca2
|
[
"Apache-2.0"
] | 72 |
2021-10-09T04:59:09.000Z
|
2022-03-31T11:27:54.000Z
|
test/inference_correctness/dcn_multi_hot.py
|
Teora/HugeCTR
|
c55a63401ad350669ccfcd374aefd7a5fc879ca2
|
[
"Apache-2.0"
] | 29 |
2021-11-03T22:35:01.000Z
|
2022-03-30T13:11:59.000Z
|
import hugectr
from mpi4py import MPI
solver = hugectr.CreateSolver(model_name = "dcn",
max_eval_batches = 1,
batchsize_eval = 16384,
batchsize = 16384,
lr = 0.001,
vvgpu = [[0]],
repeat_dataset = True,
use_mixed_precision = False,
scaler = 1.0,
use_cuda_graph = True,
metrics_spec = {hugectr.MetricsType.AUC: 1.0})
reader = hugectr.DataReaderParams(data_reader_type = hugectr.DataReaderType_t.Norm,
source = ["./dcn_data/file_list.txt"],
eval_source = "./dcn_data/file_list_test.txt",
check_type = hugectr.Check_t.Sum,
num_workers = 16)
optimizer = hugectr.CreateOptimizer(optimizer_type = hugectr.Optimizer_t.Adam,
update_type = hugectr.Update_t.Global,
beta1 = 0.9,
beta2 = 0.999,
epsilon = 0.0001)
model = hugectr.Model(solver, reader, optimizer)
model.add(hugectr.Input(label_dim = 1, label_name = "label",
dense_dim = 13, dense_name = "dense",
data_reader_sparse_param_array =
[hugectr.DataReaderSparseParam("data1", 2, False, 26)]))
model.add(hugectr.SparseEmbedding(embedding_type = hugectr.Embedding_t.DistributedSlotSparseEmbeddingHash,
workspace_size_per_gpu_in_mb = 300,
embedding_vec_size = 16,
combiner = "sum",
sparse_embedding_name = "sparse_embedding1",
bottom_name = "data1",
optimizer = optimizer))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Reshape,
bottom_names = ["sparse_embedding1"],
top_names = ["reshape1"],
leading_dim=416))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["reshape1", "dense"], top_names = ["concat1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Slice,
bottom_names = ["concat1"],
top_names = ["slice11", "slice12"],
ranges=[(0,429),(0,429)]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.MultiCross,
bottom_names = ["slice11"],
top_names = ["multicross1"],
num_layers=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["slice12"],
top_names = ["fc1"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc1"],
top_names = ["relu1"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu1"],
top_names = ["dropout1"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["dropout1"],
top_names = ["fc2"],
num_output=1024))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.ReLU,
bottom_names = ["fc2"],
top_names = ["relu2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Dropout,
bottom_names = ["relu2"],
top_names = ["dropout2"],
dropout_rate=0.5))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.Concat,
bottom_names = ["dropout2", "multicross1"],
top_names = ["concat2"]))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.InnerProduct,
bottom_names = ["concat2"],
top_names = ["fc3"],
num_output=1))
model.add(hugectr.DenseLayer(layer_type = hugectr.Layer_t.BinaryCrossEntropyLoss,
bottom_names = ["fc3", "label"],
top_names = ["loss"]))
model.compile()
model.summary()
model.graph_to_json(graph_config_file = "/dump_infer/dcn.json")
model.fit(max_iter = 2300, display = 200, eval_interval = 2000, snapshot = 2000, snapshot_prefix = "/dump_infer/dcn")
model.export_predictions("/dump_infer/dcn_pred_" + str(2000), "/dump_infer/dcn_label_" + str(2000))
from hugectr.inference import InferenceParams, CreateInferenceSession
import numpy as np
batch_size = 16384
num_batches = 1
data_source = "./dcn_data/file_list_test.txt"
inference_params = InferenceParams(model_name = "dcn",
max_batchsize = batch_size,
hit_rate_threshold = 1.0,
dense_model_file = "/dump_infer/dcn_dense_2000.model",
sparse_model_files = ["/dump_infer/dcn0_sparse_2000.model"],
device_id = 0,
use_gpu_embedding_cache = False,
cache_size_percentage = 1.0,
i64_input_key = False,
use_mixed_precision = False,
use_cuda_graph = True)
inference_session = CreateInferenceSession("/dump_infer/dcn.json", inference_params)
predictions = inference_session.predict(num_batches = num_batches,
source = data_source,
data_reader_type = hugectr.DataReaderType_t.Norm,
check_type = hugectr.Check_t.Sum)
grount_truth = np.loadtxt("/dump_infer/dcn_pred_2000")
diff = predictions-grount_truth
mse = np.mean(diff*diff)
if mse > 1e-3:
raise RuntimeError("Too large mse between DCN multi hot inference and training: {}".format(mse))
sys.exit(1)
else:
print("DCN multi hot inference results are consistent with those during training, mse: {}".format(mse))
| 56.717949 | 117 | 0.517179 |
dbeaa0d47dcb9a56338a2f94ede14d6545fab66f
| 4,437 |
py
|
Python
|
bindings/pydrake/systems/perception.py
|
RobotLocomotion/drake-python3.7
|
ae397a4c6985262d23e9675b9bf3927c08d027f5
|
[
"BSD-3-Clause"
] | 2 |
2021-02-25T02:01:02.000Z
|
2021-03-17T04:52:04.000Z
|
bindings/pydrake/systems/perception.py
|
RobotLocomotion/drake-python3.7
|
ae397a4c6985262d23e9675b9bf3927c08d027f5
|
[
"BSD-3-Clause"
] | null | null | null |
bindings/pydrake/systems/perception.py
|
RobotLocomotion/drake-python3.7
|
ae397a4c6985262d23e9675b9bf3927c08d027f5
|
[
"BSD-3-Clause"
] | 1 |
2021-06-13T12:05:39.000Z
|
2021-06-13T12:05:39.000Z
|
import numpy as np
from pydrake.common.value import AbstractValue
from pydrake.math import RigidTransform
from pydrake.perception import BaseField, Fields, PointCloud
from pydrake.systems.framework import LeafSystem
| 34.664063 | 79 | 0.626549 |
dbeb68c7ee7ea08f9d92285ea9d761b3aba02878
| 5,115 |
py
|
Python
|
experiments/db_test.py
|
mit-ll/CATAN
|
7cc6f7e8af459c0f6bcf325f0754db1ba5b591ac
|
[
"BSD-3-Clause"
] | 15 |
2015-06-05T20:13:40.000Z
|
2020-12-24T05:16:57.000Z
|
experiments/db_test.py
|
mit-ll/CATAN
|
7cc6f7e8af459c0f6bcf325f0754db1ba5b591ac
|
[
"BSD-3-Clause"
] | 10 |
2016-03-04T23:05:56.000Z
|
2016-05-18T18:14:13.000Z
|
experiments/db_test.py
|
mit-ll/CATAN
|
7cc6f7e8af459c0f6bcf325f0754db1ba5b591ac
|
[
"BSD-3-Clause"
] | 6 |
2015-10-15T19:23:58.000Z
|
2021-06-29T07:36:16.000Z
|
#!/usr/bin/env python
"""
@author Hongyi Hu
2015 Massachusetts Institute of Technology
"""
import argparse
import random
import catan.db
from catan.data import NodeMessage
# test data
STATUS_LIST = ['ok', 'injured', 'deceased']
# nodes
# people
def gen_people(n, db, start_lat, stop_lat, start_long, stop_long):
"""
Generates n people, random male/female ratio between 5 and 90 years of age
"""
assert n > 0
# open male first names file
f = open('dist.male.first','r')
male_first_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# open female first names file
f = open('dist.female.first','r')
female_first_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# open last names file
f = open('dist.all.last','r')
family_names = [name.strip().split()[0] for name in f.readlines()]
f.close()
# generate people
for i in range(n):
catanDBObj = catan.db.CatanDatabaseObject()
# bio
sex = random.randint(0,1)
if sex == 0: # male
catanDBObj.person_bio.name_given = male_first_names[random.randint(0,len(male_first_names)-1)]
catanDBObj.person_bio.sex = 'male'
else: # female
catanDBObj.person_bio.name_given = female_first_names[random.randint(0,len(female_first_names)-1)]
catanDBObj.person_bio.sex = 'female'
catanDBObj.person_bio.name_family = family_names[random.randint(0,len(family_names)-1)]
catanDBObj.person_bio.age = random.randint(5,90)
# message (message, status, location, etc.)
# location
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
catanDBObj.person_message.person_message = 'Hi Mom'
catanDBObj.person_message.status_gps_latitude = lat
catanDBObj.person_message.status_gps_longitude = lng
catanDBObj.person_message.status_gps_accuracy = 0
# status
catanDBObj.person_message.status = STATUS_LIST[random.randint(0,len(STATUS_LIST)-1)]
catanDBObj.person_message.status_location = 'Test status location'
# generate a NodeMessage for the database
# it only cares about the data and source fields, so we can ignore other fields
nmsg = NodeMessage()
nmsg.source = random.randint(0,31) # random node 0-31
nmsg.data = catanDBObj.pack()
db.update_db(nmsg)
# Create some random updates
for i in range(1,n+1):
update = random.randint(0,1)
if update == 0:
catanDBObj = catan.db.CatanDatabaseObject()
catanDBObj.person_id = i
# location
lat = round(random.uniform(start_lat, stop_lat), 6)
lng = round(random.uniform(start_long, stop_long), 6)
catanDBObj.person_message.person_message = 'Location update 1'
catanDBObj.person_message.status_gps_latitude = lat
catanDBObj.person_message.status_gps_longitude = lng
catanDBObj.person_message.status_gps_accuracy = 0
n = NodeMessage()
n.source = random.randint(0,31)
n.data = catanDBObj.pack()
db.update_db(n)
if __name__=='__main__':
populate_db()
| 31.189024 | 111 | 0.634018 |
dbebfc8120396c97668055085f91d5e5e0b1e5af
| 2,183 |
py
|
Python
|
Medium/200.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 6 |
2017-09-25T18:05:50.000Z
|
2019-03-27T00:23:15.000Z
|
Medium/200.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | 1 |
2017-10-29T12:04:41.000Z
|
2018-08-16T18:00:37.000Z
|
Medium/200.py
|
Hellofafar/Leetcode
|
7a459e9742958e63be8886874904e5ab2489411a
|
[
"CNRI-Python"
] | null | null | null |
# ------------------------------
# 200. Number of Islands
#
# Description:
# Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
# 11110
# 11010
# 11000
# 00000
# Answer: 1
#
# Example 2:
# 11000
# 11000
# 00100
# 00011
# Answer: 3
#
# Version: 1.0
# 11/13/17 by Jianfa
# ------------------------------
# ------------------------------
# Summary:
# Copied from discussion.
# The following is another easy understanding idea:
#
# class Solution(object):
# def numIslands(self, grid):
# """
# :type grid: List[List[str]]
# :rtype: int
# """
# if len(grid) == 0: return 0
# m = len(grid)
# n = len(grid[0])
# res = 0
# for i in range(m):
# for j in range(n):
# if grid[i][j] == '1':
# res += 1
# grid[i][j] = '2'
# self.island(i, j, grid, m, n)
# return res
# def island(self, x, y, grid, m, n):
# if x + 1 < m and grid[x+1][y] == '1':
# grid[x+1][y] = '2'
# self.island(x+1,y,grid, m, n)
# if y + 1 < n and grid[x][y+1] == '1':
# grid[x][y+1] = '2'
# self.island(x,y+1,grid, m, n)
# if x -1 >=0 and grid[x-1][y] == '1':
# grid[x-1][y] = '2'
# self.island(x-1,y,grid, m, n)
# if y - 1 >= 0 and grid[x][y-1] == '1':
# grid[x][y-1] = '2'
# self.island(x,y-1,grid, m, n)
| 29.106667 | 258 | 0.44022 |
dbec13a8be9b82963156b2e9e29130d14a7c09eb
| 975 |
py
|
Python
|
tests/formatters/fseventsd.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
tests/formatters/fseventsd.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
tests/formatters/fseventsd.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the fseventsd record event formatter."""
from __future__ import unicode_literals
import unittest
from plaso.formatters import fseventsd
from tests.formatters import test_lib
if __name__ == '__main__':
unittest.main()
| 26.351351 | 67 | 0.756923 |
dbec8e855b885f99aff4e865947ea4c6e652c177
| 2,415 |
py
|
Python
|
train.py
|
Farzin-Negahbani/PathoNet
|
b467a255fb356e64129b7942261e972ae15a2d2b
|
[
"MIT"
] | null | null | null |
train.py
|
Farzin-Negahbani/PathoNet
|
b467a255fb356e64129b7942261e972ae15a2d2b
|
[
"MIT"
] | null | null | null |
train.py
|
Farzin-Negahbani/PathoNet
|
b467a255fb356e64129b7942261e972ae15a2d2b
|
[
"MIT"
] | null | null | null |
from keras.callbacks import ModelCheckpoint,Callback,LearningRateScheduler,TensorBoard
from keras.models import load_model
import random
import numpy as np
from scipy import misc
import gc
from keras.optimizers import Adam
from imageio import imread
from datetime import datetime
import os
import json
import models
from utils import DataLoader, LrPolicy
from config import Config
import argparse
if __name__ == "__main__":
train()
| 41.637931 | 180 | 0.708075 |
dbed1f6b6c1523d648a1c00ecfbe4157990ceba2
| 1,445 |
py
|
Python
|
tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | 7 |
2017-05-08T07:02:40.000Z
|
2018-12-02T18:35:39.000Z
|
tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
tests/chainer_tests/functions_tests/array_tests/test_flatten.py
|
mingxiaoh/chainer-v3
|
815ff00f5eaf7944d6e8a75662ff64a2fe046a4d
|
[
"BSD-3-Clause"
] | null | null | null |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
testing.run_module(__name__, __file__)
| 26.759259 | 77 | 0.680969 |
dbed5bb534715e304b67cd5a82e6d1e8cca605fa
| 1,693 |
py
|
Python
|
categories/migrations/0001_initial.py
|
snoop2head/exercise_curation_django
|
ba35bd32d8bc203d318cb8b6e0a1722f3aa26eda
|
[
"MIT"
] | 3 |
2020-09-30T04:44:39.000Z
|
2021-07-30T08:20:18.000Z
|
categories/migrations/0001_initial.py
|
snoop2head/exercise_curation_django
|
ba35bd32d8bc203d318cb8b6e0a1722f3aa26eda
|
[
"MIT"
] | 7 |
2021-03-30T13:09:55.000Z
|
2022-01-13T02:33:34.000Z
|
categories/migrations/0001_initial.py
|
snoop2head/exercise_curation_django
|
ba35bd32d8bc203d318cb8b6e0a1722f3aa26eda
|
[
"MIT"
] | 1 |
2022-03-31T12:01:38.000Z
|
2022-03-31T12:01:38.000Z
|
# Generated by Django 3.0.3 on 2020-03-24 09:59
from django.db import migrations, models
import django.db.models.deletion
| 37.622222 | 158 | 0.569403 |
dbed62851d59b2fa6655d17b726752f0c24c4682
| 2,773 |
py
|
Python
|
src/metarl/envs/dm_control/dm_control_env.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | 2 |
2021-02-07T12:14:52.000Z
|
2021-07-29T08:07:22.000Z
|
src/metarl/envs/dm_control/dm_control_env.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
src/metarl/envs/dm_control/dm_control_env.py
|
neurips2020submission11699/metarl
|
ae4825d21478fa1fd0aa6b116941ea40caa152a5
|
[
"MIT"
] | null | null | null |
from dm_control import suite
from dm_control.rl.control import flatten_observation
from dm_env import StepType
import gym
import numpy as np
from metarl.envs import Step
from metarl.envs.dm_control.dm_control_viewer import DmControlViewer
| 33.011905 | 79 | 0.586729 |
dbef5ddea825a12fdea28a38b148d831f47bd566
| 1,446 |
py
|
Python
|
python_modules/lakehouse/lakehouse/snowflake_table.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 3 |
2020-04-28T16:27:33.000Z
|
2020-07-22T07:43:30.000Z
|
python_modules/lakehouse/lakehouse/snowflake_table.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 2 |
2021-05-11T13:36:27.000Z
|
2021-09-03T01:53:11.000Z
|
python_modules/lakehouse/lakehouse/snowflake_table.py
|
vatervonacht/dagster
|
595d78c883ef20618052ac1575fe46cde51fd541
|
[
"Apache-2.0"
] | 1 |
2021-02-21T12:16:47.000Z
|
2021-02-21T12:16:47.000Z
|
from dagster import check
from .house import Lakehouse
from .table import create_lakehouse_table_def
| 26.777778 | 98 | 0.670816 |
dbef871a16cf470112cb22aef95e471326a91ea8
| 1,976 |
py
|
Python
|
pype/plugins/maya/publish/validate_look_no_default_shaders.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_look_no_default_shaders.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
pype/plugins/maya/publish/validate_look_no_default_shaders.py
|
tokejepsen/pype
|
8f2b2b631cc5d3ad93eeb5ad3bc6110d32466ed3
|
[
"MIT"
] | null | null | null |
from maya import cmds
import pyblish.api
import pype.api
import pype.maya.action
| 31.365079 | 76 | 0.619433 |
dbeff0d906fdca4fe34a55902305e858b8a7efb0
| 446 |
py
|
Python
|
data_science_app/app.py
|
Johne-DuChene/data_science_learning_app
|
40bafce85a27155766950806b5b32a2d1f6753c4
|
[
"MIT"
] | null | null | null |
data_science_app/app.py
|
Johne-DuChene/data_science_learning_app
|
40bafce85a27155766950806b5b32a2d1f6753c4
|
[
"MIT"
] | null | null | null |
data_science_app/app.py
|
Johne-DuChene/data_science_learning_app
|
40bafce85a27155766950806b5b32a2d1f6753c4
|
[
"MIT"
] | null | null | null |
from flask import Flask
# initialize the app
app = Flask(__name__)
# execute iris function at /iris route
| 24.777778 | 55 | 0.674888 |
dbf02afc10d2a9ad48452a7e76a2ad7a46bdd3f5
| 10,714 |
py
|
Python
|
vbdiar/scoring/normalization.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 101 |
2017-12-19T21:55:59.000Z
|
2022-03-15T06:56:06.000Z
|
vbdiar/scoring/normalization.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 27 |
2017-07-20T06:10:42.000Z
|
2020-11-22T14:15:16.000Z
|
vbdiar/scoring/normalization.py
|
VarunSrivastava19/VBDiarization
|
2a460b4fc11b3a5ff73d0534cadb182be1a9d882
|
[
"Apache-2.0"
] | 30 |
2017-07-17T08:53:44.000Z
|
2021-05-18T07:37:46.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018 Brno University of Technology FIT
# Author: Jan Profant <[email protected]>
# All Rights Reserved
import os
import logging
import pickle
import multiprocessing
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
from vbdiar.features.segments import get_frames_from_time
from vbdiar.embeddings.embedding import extract_embeddings
from vbdiar.utils import mkdir_p
from vbdiar.utils.utils import Utils
logger = logging.getLogger(__name__)
def process_files(fns, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length, n_jobs=1):
"""
Args:
fns:
speakers_dict:
features_extractor:
embedding_extractor:
audio_dir:
wav_suffix:
in_rttm_dir:
rttm_suffix:
min_length:
n_jobs:
Returns:
"""
kwargs = dict(speakers_dict=speakers_dict, features_extractor=features_extractor,
embedding_extractor=embedding_extractor, audio_dir=audio_dir, wav_suffix=wav_suffix,
in_rttm_dir=in_rttm_dir, rttm_suffix=rttm_suffix, min_length=min_length)
if n_jobs == 1:
ret = _process_files((fns, kwargs))
else:
pool = multiprocessing.Pool(n_jobs)
ret = pool.map(_process_files, ((part, kwargs) for part in Utils.partition(fns, n_jobs)))
return ret
def _process_files(dargs):
"""
Args:
dargs:
Returns:
"""
fns, kwargs = dargs
ret = []
for fn in fns:
ret.append(process_file(file_name=fn, **kwargs))
return ret
def process_file(file_name, speakers_dict, features_extractor, embedding_extractor,
audio_dir, wav_suffix, in_rttm_dir, rttm_suffix, min_length):
""" Extract embeddings for all defined speakers.
Args:
file_name (string_types): path to input audio file
speakers_dict (dict): dictionary containing all embedding across speakers
features_extractor (Any):
embedding_extractor (Any):
audio_dir (string_types):
wav_suffix (string_types):
in_rttm_dir (string_types):
rttm_suffix (string_types):
min_length (float):
Returns:
dict: updated dictionary with speakers
"""
logger.info('Processing file `{}`.'.format(file_name.split()[0]))
# extract features from whole audio
features = features_extractor.audio2features(os.path.join(audio_dir, '{}{}'.format(file_name, wav_suffix)))
# process utterances of the speakers
features_dict = {}
with open(f'{os.path.join(in_rttm_dir, file_name)}{rttm_suffix}') as f:
for line in f:
start_time, dur = int(float(line.split()[3]) * 1000), int(float(line.split()[4]) * 1000)
speaker = line.split()[7]
if dur > min_length:
end_time = start_time + dur
start, end = get_frames_from_time(int(start_time)), get_frames_from_time(int(end_time))
if speaker not in features_dict:
features_dict[speaker] = {}
assert 0 <= start < end, \
f'Incorrect timing for extracting features, start: {start}, size: {features.shape[0]}, end: {end}.'
if end >= features.shape[0]:
end = features.shape[0] - 1
features_dict[speaker][(start_time, end_time)] = features[start:end]
for speaker in features_dict:
embedding_set = extract_embeddings(features_dict[speaker], embedding_extractor)
embeddings_long = embedding_set.get_all_embeddings()
if speaker not in speakers_dict.keys():
speakers_dict[speaker] = embeddings_long
else:
speakers_dict[speaker] = np.concatenate((speakers_dict[speaker], embeddings_long), axis=0)
return speakers_dict
| 39.10219 | 119 | 0.614243 |
dbf19789118428ff5f8d3aa59b32b64fa444b8b7
| 984 |
py
|
Python
|
agent_based_models/abm_allelopathy/plot_data.py
|
mattsmart/biomodels
|
237f87489553fa1ebf5c676fab563166dd0c39e9
|
[
"MIT"
] | null | null | null |
agent_based_models/abm_allelopathy/plot_data.py
|
mattsmart/biomodels
|
237f87489553fa1ebf5c676fab563166dd0c39e9
|
[
"MIT"
] | null | null | null |
agent_based_models/abm_allelopathy/plot_data.py
|
mattsmart/biomodels
|
237f87489553fa1ebf5c676fab563166dd0c39e9
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import os
| 30.75 | 111 | 0.650407 |
dbf1c54ca3fd34dfbf7ce18d8d98a14afb9379e4
| 1,056 |
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1 |
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2 |
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_10_01/models/virtual_wan_security_providers.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
| 35.2 | 101 | 0.630682 |
dbf2e984865e076aaf055509509eac8230a5a7d1
| 438 |
py
|
Python
|
jsonresume_theme_stackoverflow/filters.py
|
flowgunso/jsonresume-theme-stackoverflow
|
5fcadcf41a93478a09e95d79fd62d8ac3402b33b
|
[
"MIT"
] | null | null | null |
jsonresume_theme_stackoverflow/filters.py
|
flowgunso/jsonresume-theme-stackoverflow
|
5fcadcf41a93478a09e95d79fd62d8ac3402b33b
|
[
"MIT"
] | 4 |
2020-12-29T14:04:48.000Z
|
2021-01-01T20:23:37.000Z
|
jsonresume_theme_stackoverflow/filters.py
|
flowgunso/jsonresume-theme-stackoverflow
|
5fcadcf41a93478a09e95d79fd62d8ac3402b33b
|
[
"MIT"
] | null | null | null |
import datetime
import re
from .exceptions import ObjectIsNotADate
| 24.333333 | 79 | 0.586758 |
dbf349d5a69e925a415de30492c1747e358368f6
| 3,966 |
py
|
Python
|
ipec/data/core.py
|
wwwbbb8510/ippso
|
fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5
|
[
"MIT"
] | 9 |
2018-05-10T01:04:34.000Z
|
2019-06-28T07:47:37.000Z
|
ipec/data/core.py
|
wwwbbb8510/ippso
|
fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5
|
[
"MIT"
] | null | null | null |
ipec/data/core.py
|
wwwbbb8510/ippso
|
fa20d23cd8edba5908e65a0ab0ab990d7ce3d5d5
|
[
"MIT"
] | 2 |
2020-10-12T03:54:30.000Z
|
2021-09-08T14:10:21.000Z
|
import numpy as np
import os
import logging
from sklearn.model_selection import train_test_split
DATASET_ROOT_FOLDER = os.path.abspath('datasets')
| 36.054545 | 150 | 0.614977 |
dbf3d541561ba11217ad33d7f2e880d8ae1b4729
| 1,567 |
py
|
Python
|
FOR/Analisador-completo/main.py
|
lucasf5/Python
|
c5649121e2af42922e2d9c19cec98322e132bdab
|
[
"MIT"
] | 1 |
2021-09-28T13:11:56.000Z
|
2021-09-28T13:11:56.000Z
|
FOR/Analisador-completo/main.py
|
lucasf5/Python
|
c5649121e2af42922e2d9c19cec98322e132bdab
|
[
"MIT"
] | null | null | null |
FOR/Analisador-completo/main.py
|
lucasf5/Python
|
c5649121e2af42922e2d9c19cec98322e132bdab
|
[
"MIT"
] | null | null | null |
# Exerccio Python 56: Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final do programa, mostre: a mdia de idade do grupo, qual o nome do homem mais velho e quantas mulheres tm menos de 20 anos.
mediaidade = ''
nomelista = []
idadelista = []
sexolista = []
homens = []
mulherescommenosde20 = 0
nomedelas = []
# -------------------------------------------------------------------
for i in range(1,5):
print(f'{i} PESSOA')
nome = (input('Seu nome: '))
idade = int(input('Sua idade: '))
sexo = int(input('Sexo? [0]Masculino [1]Feminino: '))
if sexo == 1 and idade < 20:
nomedelas.append(nome)
mulherescommenosde20 += 1
elif sexo == 0:
homens.append(nome)
# Adcionei todas idades em uma lista
idadelista.append(idade)
# Tirei a mdia dessas idades //Primeira parte
mediaidade = ((sum(idadelista))/4)
# Adcionei todos os nomes em uma lista
nomelista.append(nome)
# -------------------------------------------------------------------
# Armazenei em maximo o maior valor encontrado dentro de uma lista
maximo = max(idadelista)
# Armazenei em idadexidade o INDEX do maior valor
indexidade = idadelista.index(maximo)
# Armazenei em indexnome a posio de quem tem a maior idade
indexnome = nomelista[indexidade]
# -------------------------------------------------------------------
print(f'A media das idades : {mediaidade}')
print(f'A pessoa que tem a maior idade, com {maximo} essa: {indexnome}')
print(f'As mulheres que possuem menos de 20 anos: {mulherescommenosde20} e so: {nomedelas}')
| 27.982143 | 221 | 0.612636 |
dbf566f5e271a38bb7effb6c5cb9d1b3bcf1fdab
| 22,131 |
py
|
Python
|
test/python/quantum_info/operators/test_operator.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 2 |
2019-06-28T19:58:42.000Z
|
2019-07-26T05:04:02.000Z
|
test/python/quantum_info/operators/test_operator.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | null | null | null |
test/python/quantum_info/operators/test_operator.py
|
EnriqueL8/qiskit-terra
|
08b801f1f8598c4e44680b4a75c232ed92db0262
|
[
"Apache-2.0"
] | 1 |
2020-01-24T21:01:06.000Z
|
2020-01-24T21:01:06.000Z
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Operator matrix linear operator class."""
import unittest
import logging
import copy
import numpy as np
from numpy.testing import assert_allclose
import scipy.linalg as la
from qiskit import QiskitError
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit.extensions.standard import HGate, CHGate, CXGate
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.operators.operator import Operator
from qiskit.quantum_info.operators.predicates import matrix_equal
logger = logging.getLogger(__name__)
def simple_circuit_with_measure(self):
"""Return a unitary circuit with measurement."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circ = QuantumCircuit(qr, cr)
circ.h(qr[0])
circ.x(qr[1])
circ.measure(qr, cr)
return circ
class TestOperator(OperatorTestCase):
"""Tests for Operator linear operator class."""
def test_init_array_qubit(self):
"""Test subsystem initialization from N-qubit array."""
# Test automatic inference of qubit subsystems
mat = self.rand_matrix(8, 8)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
op = Operator(mat, input_dims=8, output_dims=8)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (8, 8))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(op.output_dims(), (2, 2, 2))
def test_init_array(self):
"""Test initialization from array."""
mat = np.eye(3)
op = Operator(mat)
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (3, 3))
self.assertEqual(op.input_dims(), (3,))
self.assertEqual(op.output_dims(), (3,))
mat = self.rand_matrix(2 * 3 * 4, 4 * 5)
op = Operator(mat, input_dims=[4, 5], output_dims=[2, 3, 4])
assert_allclose(op.data, mat)
self.assertEqual(op.dim, (4 * 5, 2 * 3 * 4))
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.output_dims(), (2, 3, 4))
def test_init_array_except(self):
"""Test initialization exception from array."""
mat = self.rand_matrix(4, 4)
self.assertRaises(QiskitError, Operator, mat, input_dims=[4, 2])
self.assertRaises(QiskitError, Operator, mat, input_dims=[2, 4])
self.assertRaises(QiskitError, Operator, mat, input_dims=5)
def test_init_operator(self):
"""Test initialization from Operator."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = Operator(op1)
self.assertEqual(op1, op2)
def test_circuit_init(self):
"""Test initialization from a circuit."""
# Test tensor product of 1-qubit gates
circuit = QuantumCircuit(3)
circuit.h(0)
circuit.x(1)
circuit.ry(np.pi / 2, 2)
op = Operator(circuit)
y90 = (1 / np.sqrt(2)) * np.array([[1, -1], [1, 1]])
target = np.kron(y90, np.kron(self.UX, self.UH))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of Controlled-u1 gate
lam = np.pi / 4
circuit = QuantumCircuit(2)
circuit.cu1(lam, 0, 1)
op = Operator(circuit)
target = np.diag([1, 1, 1, np.exp(1j * lam)])
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
# Test decomposition of controlled-H gate
circuit = QuantumCircuit(2)
circuit.ch(0, 1)
op = Operator(circuit)
target = np.kron(self.UI, np.diag([1, 0])) + np.kron(
self.UH, np.diag([0, 1]))
global_phase_equivalent = matrix_equal(
op.data, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_instruction_init(self):
"""Test initialization from a circuit."""
gate = CXGate()
op = Operator(gate).data
target = gate.to_matrix()
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
gate = CHGate()
op = Operator(gate).data
had = HGate().to_matrix()
target = np.kron(had, np.diag([0, 1])) + np.kron(
np.eye(2), np.diag([1, 0]))
global_phase_equivalent = matrix_equal(op, target, ignore_phase=True)
self.assertTrue(global_phase_equivalent)
def test_circuit_init_except(self):
"""Test initialization from circuit with measure raises exception."""
circuit = self.simple_circuit_with_measure()
self.assertRaises(QiskitError, Operator, circuit)
def test_equal(self):
"""Test __eq__ method"""
mat = self.rand_matrix(2, 2, real=True)
self.assertEqual(Operator(np.array(mat, dtype=complex)),
Operator(mat))
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat.tolist()),
Operator(mat))
def test_data(self):
"""Test Operator representation string property."""
mat = self.rand_matrix(2, 2)
op = Operator(mat)
assert_allclose(mat, op.data)
def test_dim(self):
"""Test Operator dim property."""
mat = self.rand_matrix(4, 4)
self.assertEqual(Operator(mat).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[4], output_dims=[4]).dim, (4, 4))
self.assertEqual(Operator(mat, input_dims=[2, 2], output_dims=[2, 2]).dim, (4, 4))
def test_input_dims(self):
"""Test Operator input_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.input_dims(), (4, 5))
self.assertEqual(op.input_dims(qargs=[0, 1]), (4, 5))
self.assertEqual(op.input_dims(qargs=[1, 0]), (5, 4))
self.assertEqual(op.input_dims(qargs=[0]), (4,))
self.assertEqual(op.input_dims(qargs=[1]), (5,))
def test_output_dims(self):
"""Test Operator output_dims method."""
op = Operator(self.rand_matrix(2 * 3 * 4, 4 * 5),
input_dims=[4, 5], output_dims=[2, 3, 4])
self.assertEqual(op.output_dims(), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[0, 1, 2]), (2, 3, 4))
self.assertEqual(op.output_dims(qargs=[2, 1, 0]), (4, 3, 2))
self.assertEqual(op.output_dims(qargs=[2, 0, 1]), (4, 2, 3))
self.assertEqual(op.output_dims(qargs=[0]), (2,))
self.assertEqual(op.output_dims(qargs=[1]), (3,))
self.assertEqual(op.output_dims(qargs=[2]), (4,))
self.assertEqual(op.output_dims(qargs=[0, 2]), (2, 4))
self.assertEqual(op.output_dims(qargs=[2, 0]), (4, 2))
def test_reshape(self):
"""Test Operator reshape method."""
op = Operator(self.rand_matrix(8, 8))
reshaped1 = op.reshape(input_dims=[8], output_dims=[8])
reshaped2 = op.reshape(input_dims=[4, 2], output_dims=[2, 4])
self.assertEqual(op.output_dims(), (2, 2, 2))
self.assertEqual(op.input_dims(), (2, 2, 2))
self.assertEqual(reshaped1.output_dims(), (8,))
self.assertEqual(reshaped1.input_dims(), (8,))
self.assertEqual(reshaped2.output_dims(), (2, 4))
self.assertEqual(reshaped2.input_dims(), (4, 2))
def test_copy(self):
"""Test Operator copy method"""
mat = np.eye(2)
with self.subTest("Deep copy"):
orig = Operator(mat)
cpy = orig.copy()
cpy._data[0, 0] = 0.0
self.assertFalse(cpy == orig)
with self.subTest("Shallow copy"):
orig = Operator(mat)
clone = copy.copy(orig)
clone._data[0, 0] = 0.0
self.assertTrue(clone == orig)
def test_is_unitary(self):
"""Test is_unitary method."""
# X-90 rotation
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
self.assertTrue(Operator(X90).is_unitary())
# Non-unitary should return false
self.assertFalse(Operator([[1, 0], [0, 0]]).is_unitary())
def test_to_operator(self):
"""Test to_operator method."""
op1 = Operator(self.rand_matrix(4, 4))
op2 = op1.to_operator()
self.assertEqual(op1, op2)
def test_conjugate(self):
"""Test conjugate method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_conj = op.conjugate()
self.assertEqual(uni_conj, Operator(matr - 1j * mati))
def test_transpose(self):
"""Test transpose method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_t = op.transpose()
self.assertEqual(uni_t, Operator(matr.T + 1j * mati.T))
def test_adjoint(self):
"""Test adjoint method."""
matr = self.rand_matrix(2, 4, real=True)
mati = self.rand_matrix(2, 4, real=True)
op = Operator(matr + 1j * mati)
uni_adj = op.adjoint()
self.assertEqual(uni_adj, Operator(matr.T - 1j * mati.T))
def test_compose_except(self):
"""Test compose different dimension exception"""
self.assertRaises(QiskitError,
Operator(np.eye(2)).compose,
Operator(np.eye(3)))
self.assertRaises(QiskitError, Operator(np.eye(2)).compose, 2)
def test_compose(self):
"""Test compose method."""
op1 = Operator(self.UX)
op2 = Operator(self.UY)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.compose(op2), targ)
self.assertEqual(op1 @ op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.compose(op1), targ)
self.assertEqual(op2 @ op1, targ)
def test_dot(self):
"""Test dot method."""
op1 = Operator(self.UY)
op2 = Operator(self.UX)
targ = Operator(np.dot(self.UY, self.UX))
self.assertEqual(op1.dot(op2), targ)
self.assertEqual(op1 * op2, targ)
targ = Operator(np.dot(self.UX, self.UY))
self.assertEqual(op2.dot(op1), targ)
self.assertEqual(op2 * op1, targ)
def test_compose_front(self):
"""Test front compose method."""
opYX = Operator(self.UY).compose(Operator(self.UX), front=True)
matYX = np.dot(self.UY, self.UX)
self.assertEqual(opYX, Operator(matYX))
opXY = Operator(self.UX).compose(Operator(self.UY), front=True)
matXY = np.dot(self.UX, self.UY)
self.assertEqual(opXY, Operator(matXY))
def test_compose_subsystem(self):
"""Test subsystem compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(np.kron(mat_c, np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op.compose(op3([0, 1, 2])), Operator(targ))
self.assertEqual(op @ op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(np.kron(mat_a, np.kron(mat_b, mat_c)), mat)
self.assertEqual(op.compose(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op @ op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_b, mat_a)), mat)
self.assertEqual(op.compose(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op @ op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(np.kron(mat_a, np.kron(np.eye(2), mat_b)), mat)
self.assertEqual(op.compose(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op @ op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(np.kron(np.eye(4), mat_a), mat)
self.assertEqual(op.compose(op1, qargs=[0]), Operator(targ))
self.assertEqual(op @ op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(np.kron(np.eye(2), np.kron(mat_a, np.eye(2))), mat)
self.assertEqual(op.compose(op1, qargs=[1]), Operator(targ))
self.assertEqual(op @ op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(np.kron(mat_a, np.eye(4)), mat)
self.assertEqual(op.compose(op1, qargs=[2]), Operator(targ))
self.assertEqual(op @ op1([2]), Operator(targ))
def test_dot_subsystem(self):
"""Test subsystem dot method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op3, qargs=[0, 1, 2]), Operator(targ))
self.assertEqual(op * op3([0, 1, 2]), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.dot(op3, qargs=[2, 1, 0]), Operator(targ))
self.assertEqual(op * op3([2, 1, 0]), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.dot(op2, qargs=[0, 1]), Operator(targ))
self.assertEqual(op * op2([0, 1]), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.dot(op2, qargs=[2, 0]), Operator(targ))
self.assertEqual(op * op2([2, 0]), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.dot(op1, qargs=[0]), Operator(targ))
self.assertEqual(op * op1([0]), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.dot(op1, qargs=[1]), Operator(targ))
self.assertEqual(op * op1([1]), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.dot(op1, qargs=[2]), Operator(targ))
self.assertEqual(op * op1([2]), Operator(targ))
def test_compose_front_subsystem(self):
"""Test subsystem front compose method."""
# 3-qubit operator
mat = self.rand_matrix(8, 8)
mat_a = self.rand_matrix(2, 2)
mat_b = self.rand_matrix(2, 2)
mat_c = self.rand_matrix(2, 2)
op = Operator(mat)
op1 = Operator(mat_a)
op2 = Operator(np.kron(mat_b, mat_a))
op3 = Operator(np.kron(mat_c, np.kron(mat_b, mat_a)))
# op3 qargs=[0, 1, 2]
targ = np.dot(mat, np.kron(mat_c, np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op3, qargs=[0, 1, 2], front=True), Operator(targ))
# op3 qargs=[2, 1, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(mat_b, mat_c)))
self.assertEqual(op.compose(op3, qargs=[2, 1, 0], front=True), Operator(targ))
# op2 qargs=[0, 1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_b, mat_a)))
self.assertEqual(op.compose(op2, qargs=[0, 1], front=True), Operator(targ))
# op2 qargs=[2, 0]
targ = np.dot(mat, np.kron(mat_a, np.kron(np.eye(2), mat_b)))
self.assertEqual(op.compose(op2, qargs=[2, 0], front=True), Operator(targ))
# op1 qargs=[0]
targ = np.dot(mat, np.kron(np.eye(4), mat_a))
self.assertEqual(op.compose(op1, qargs=[0], front=True), Operator(targ))
# op1 qargs=[1]
targ = np.dot(mat, np.kron(np.eye(2), np.kron(mat_a, np.eye(2))))
self.assertEqual(op.compose(op1, qargs=[1], front=True), Operator(targ))
# op1 qargs=[2]
targ = np.dot(mat, np.kron(mat_a, np.eye(4)))
self.assertEqual(op.compose(op1, qargs=[2], front=True), Operator(targ))
def test_power(self):
"""Test power method."""
X90 = la.expm(-1j * 0.5 * np.pi * np.array([[0, 1], [1, 0]]) / 2)
op = Operator(X90)
self.assertEqual(op.power(2), Operator([[0, -1j], [-1j, 0]]))
self.assertEqual(op.power(4), Operator(-1 * np.eye(2)))
self.assertEqual(op.power(8), Operator(np.eye(2)))
def test_expand(self):
"""Test expand method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat1).expand(Operator(mat2))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat2).expand(Operator(mat1))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_tensor(self):
"""Test tensor method."""
mat1 = self.UX
mat2 = np.eye(3, dtype=complex)
mat21 = np.kron(mat2, mat1)
op21 = Operator(mat2).tensor(Operator(mat1))
self.assertEqual(op21.dim, (6, 6))
assert_allclose(op21.data, Operator(mat21).data)
mat12 = np.kron(mat1, mat2)
op12 = Operator(mat1).tensor(Operator(mat2))
self.assertEqual(op12.dim, (6, 6))
assert_allclose(op12.data, Operator(mat12).data)
def test_power_except(self):
"""Test power method raises exceptions."""
op = Operator(self.rand_matrix(3, 3))
# Non-integer power raises error
self.assertRaises(QiskitError, op.power, 0.5)
def test_add(self):
"""Test add method."""
mat1 = self.rand_matrix(4, 4)
mat2 = self.rand_matrix(4, 4)
op1 = Operator(mat1)
op2 = Operator(mat2)
self.assertEqual(op1._add(op2), Operator(mat1 + mat2))
self.assertEqual(op1 + op2, Operator(mat1 + mat2))
self.assertEqual(op1 - op2, Operator(mat1 - mat2))
def test_add_except(self):
"""Test add method raises exceptions."""
op1 = Operator(self.rand_matrix(2, 2))
op2 = Operator(self.rand_matrix(3, 3))
self.assertRaises(QiskitError, op1._add, op2)
def test_multiply(self):
"""Test multiply method."""
mat = self.rand_matrix(4, 4)
val = np.exp(5j)
op = Operator(mat)
self.assertEqual(op._multiply(val), Operator(val * mat))
self.assertEqual(val * op, Operator(val * mat))
def test_multiply_except(self):
"""Test multiply method raises exceptions."""
op = Operator(self.rand_matrix(2, 2))
self.assertRaises(QiskitError, op._multiply, 's')
self.assertRaises(QiskitError, op.__rmul__, 's')
self.assertRaises(QiskitError, op._multiply, op)
self.assertRaises(QiskitError, op.__rmul__, op)
def test_negate(self):
"""Test negate method"""
mat = self.rand_matrix(4, 4)
op = Operator(mat)
self.assertEqual(-op, Operator(-1 * mat))
def test_equiv(self):
"""Test negate method"""
mat = np.diag([1, np.exp(1j * np.pi / 2)])
phase = np.exp(-1j * np.pi / 4)
op = Operator(mat)
self.assertTrue(op.equiv(phase * mat))
self.assertTrue(op.equiv(Operator(phase * mat)))
self.assertFalse(op.equiv(2 * mat))
if __name__ == '__main__':
unittest.main()
| 38.826316 | 90 | 0.592246 |
dbf638d5301c89db06eb944d0fa138829b04d587
| 243 |
py
|
Python
|
pages/feature_modal.py
|
jack-skerrett-bluefruit/Python-ScreenPlay
|
045486bdf441fa3a7a6cde59e7b7e12a7d53fbed
|
[
"MIT"
] | null | null | null |
pages/feature_modal.py
|
jack-skerrett-bluefruit/Python-ScreenPlay
|
045486bdf441fa3a7a6cde59e7b7e12a7d53fbed
|
[
"MIT"
] | null | null | null |
pages/feature_modal.py
|
jack-skerrett-bluefruit/Python-ScreenPlay
|
045486bdf441fa3a7a6cde59e7b7e12a7d53fbed
|
[
"MIT"
] | null | null | null |
from selenium.webdriver.common.by import By
| 30.375 | 82 | 0.699588 |
dbf65821c57bcbfbc7d857dacc3de5d4175d7481
| 1,101 |
py
|
Python
|
liststations.py
|
CrookedY/AirPollutionBot
|
ce79037d6dddd1f297fce04a694b49f8b9a1bfad
|
[
"Apache-2.0"
] | 1 |
2018-08-10T14:06:07.000Z
|
2018-08-10T14:06:07.000Z
|
liststations.py
|
CrookedY/AirPollutionBot
|
ce79037d6dddd1f297fce04a694b49f8b9a1bfad
|
[
"Apache-2.0"
] | 2 |
2017-08-09T11:24:31.000Z
|
2018-03-01T22:50:04.000Z
|
liststations.py
|
CrookedY/AirPollutionBot
|
ce79037d6dddd1f297fce04a694b49f8b9a1bfad
|
[
"Apache-2.0"
] | null | null | null |
from urllib2 import Request, urlopen, URLError
import json
request = Request('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/')
try:
response = urlopen(request)
data = response.read()
except URLError, e:
print 'error:', e
stations= json.loads (data)
#extract out station 2
stations2 = stations [7]
properties = stations2[u'properties']
#extract ID so can be use in link
ID = properties[u'id']
#print ID
url = ('https://uk-air.defra.gov.uk/sos-ukair/api/v1/stations/'+str(ID))
request2 = Request (url)
try:
response = urlopen(request2)
data2 = response.read()
except URLError, e:
print 'error:', e
#contains station properties data. Need to get to timecourse ID
station_prop = data2
station_prop_json= json.loads (station_prop)
#ID is a key in dictionary so need to extract as a key
a= station_prop_json[u'properties'][u'timeseries'].keys()
i=a[0]
url2 =('https://uk-air.defra.gov.uk/sos-ukair/api/v1/timeseries/'+str(i) +'/getData')
request3 = Request(url2)
try:
response = urlopen(request3)
data3 = response.read()
except URLError, e:
print 'error:', e
print data3
| 23.934783 | 85 | 0.719346 |
dbf72e3ca9c0760ca6777e329e27075106a7a7eb
| 73 |
py
|
Python
|
pyfinancials/engine.py
|
kmiller96/PyFinancials
|
73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf
|
[
"MIT"
] | 1 |
2019-02-09T21:28:27.000Z
|
2019-02-09T21:28:27.000Z
|
pyfinancials/engine.py
|
kmiller96/PyFinancials
|
73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf
|
[
"MIT"
] | null | null | null |
pyfinancials/engine.py
|
kmiller96/PyFinancials
|
73a89b0fd3a3d501b8f8c770f73473e9a2d18fdf
|
[
"MIT"
] | null | null | null |
def hello_world():
"""Tests the import."""
return "Hello world!"
| 18.25 | 27 | 0.60274 |
dbf810a25b7c035adf73121054a304443a683fb0
| 748 |
py
|
Python
|
core/migrations/0002_auto_20180702_1913.py
|
mertyildiran/echo
|
805db64e3fa9d31fd3c24390fac2e9bf7c91ad57
|
[
"Apache-2.0"
] | 5 |
2018-07-26T22:48:00.000Z
|
2021-05-02T01:59:51.000Z
|
core/migrations/0002_auto_20180702_1913.py
|
mertyildiran/echo
|
805db64e3fa9d31fd3c24390fac2e9bf7c91ad57
|
[
"Apache-2.0"
] | null | null | null |
core/migrations/0002_auto_20180702_1913.py
|
mertyildiran/echo
|
805db64e3fa9d31fd3c24390fac2e9bf7c91ad57
|
[
"Apache-2.0"
] | 1 |
2018-08-04T14:07:53.000Z
|
2018-08-04T14:07:53.000Z
|
# Generated by Django 2.0.6 on 2018-07-02 19:13
import core.models
from django.db import migrations, models
| 24.933333 | 99 | 0.57754 |
dbf954bdb4324156034054e74ee082a9dc8b9157
| 6,151 |
py
|
Python
|
tests/test_helpers.py
|
ajdavis/aiohttp
|
d5138978f3e82aa82a2f003b00d38112c58a40c1
|
[
"Apache-2.0"
] | 1 |
2021-07-07T06:36:57.000Z
|
2021-07-07T06:36:57.000Z
|
tests/test_helpers.py
|
ajdavis/aiohttp
|
d5138978f3e82aa82a2f003b00d38112c58a40c1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_helpers.py
|
ajdavis/aiohttp
|
d5138978f3e82aa82a2f003b00d38112c58a40c1
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from unittest import mock
from aiohttp import helpers
import datetime
| 29.572115 | 79 | 0.643635 |
dbf96ddd90a5808166e52168da5cadf7b4bb5c35
| 372 |
py
|
Python
|
GenConfigs.py
|
truls/faas-profiler
|
d54ca0d9926f38c693f616ba4d08414aea823f51
|
[
"MIT"
] | null | null | null |
GenConfigs.py
|
truls/faas-profiler
|
d54ca0d9926f38c693f616ba4d08414aea823f51
|
[
"MIT"
] | null | null | null |
GenConfigs.py
|
truls/faas-profiler
|
d54ca0d9926f38c693f616ba4d08414aea823f51
|
[
"MIT"
] | null | null | null |
from os.path import join
FAAS_ROOT="/lhome/trulsas/faas-profiler"
WORKLOAD_SPECS=join(FAAS_ROOT, "specs", "workloads")
#FAAS_ROOT="/home/truls/uni/phd/faas-profiler"
WSK_PATH = "wsk"
OPENWHISK_PATH = "/lhome/trulsas/openwhisk"
#: Location of output data
DATA_DIR = join(FAAS_ROOT, "..", "profiler_results")
SYSTEM_CPU_SET = "0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30"
| 28.615385 | 61 | 0.736559 |
dbfb34040aab0e7552b68ecebacb85f0a1f7a601
| 211 |
py
|
Python
|
Chapter09/calc.py
|
LuisPereda/Learning_Python
|
e89e69346c5584be10d991010f39b59329793ba5
|
[
"MIT"
] | null | null | null |
Chapter09/calc.py
|
LuisPereda/Learning_Python
|
e89e69346c5584be10d991010f39b59329793ba5
|
[
"MIT"
] | null | null | null |
Chapter09/calc.py
|
LuisPereda/Learning_Python
|
e89e69346c5584be10d991010f39b59329793ba5
|
[
"MIT"
] | null | null | null |
print divide(10,0)
print sum1(10,0)
| 13.1875 | 34 | 0.635071 |
dbfbafe90d8d62c542ce03ef8a862cdef8687b06
| 5,288 |
py
|
Python
|
radssh/hostkey.py
|
Eli-Tarrago/radssh
|
ebf3c8f17c3768268dcd483e899a590698de4452
|
[
"BSD-3-Clause"
] | 39 |
2015-05-11T15:06:58.000Z
|
2021-12-29T07:24:23.000Z
|
radssh/hostkey.py
|
Eli-Tarrago/radssh
|
ebf3c8f17c3768268dcd483e899a590698de4452
|
[
"BSD-3-Clause"
] | 45 |
2015-01-05T22:11:18.000Z
|
2021-06-02T03:57:49.000Z
|
radssh/hostkey.py
|
eorochena/radssh
|
b1d1ee5822036445f26a34147452df5c3142caee
|
[
"BSD-3-Clause"
] | 13 |
2015-05-05T12:42:09.000Z
|
2022-03-03T18:09:49.000Z
|
#
# Copyright (c) 2014, 2016, 2018, 2020 LexisNexis Risk Data Management Inc.
#
# This file is part of the RadSSH software package.
#
# RadSSH is free software, released under the Revised BSD License.
# You are permitted to use, modify, and redsitribute this software
# according to the Revised BSD License, a copy of which should be
# included with the distribution as file LICENSE.txt
#
'''HostKey Handling Module'''
import os
import threading
import warnings
import paramiko.hostkeys
# Deprecated as of 1.1 - Use known_hosts rewrite instead if using this API
warnings.warn(FutureWarning('RadSSH hostkey module is no longer supported, and will be removed in release 2.0. Port existing code to use radssh.known_hosts instead.'))
verify_mode = CodeMap(
# Different options for handling host key verification
# Listed in decreasing order of security/paranoia
reject=0, # Missing keys are rejected
prompt=1, # Missing keys may be accepted, based on user prompt
accept_new=2, # Missing keys automatically accepted
# After this point, key conflicts no longer hinder connections
# Using these options, you become vulnerable to spoofing and
# intercepted traffic for SSH sessions, and you don't care.
ignore=100, # Turn host key verification OFF
overwrite_blindly=666 # Concentrated evil
)
def printable_fingerprint(k):
'''Convert key fingerprint into OpenSSH printable format'''
fingerprint = k.get_fingerprint()
# Handle Python3 bytes or Python2 8-bit string style...
if isinstance(fingerprint[0], int):
seq = [int(x) for x in fingerprint]
else:
seq = [ord(x) for x in fingerprint]
return ':'.join(['%02x' % x for x in seq])
| 39.462687 | 167 | 0.625946 |
dbfc3c9f59db54005f9a1ad67dd376c6806f7fa6
| 14,153 |
py
|
Python
|
nuke/pymmh3.py
|
jfpanisset/Cryptomatte
|
d7c71cff17a4e8895eb17520115aa45ff66b8540
|
[
"BSD-3-Clause"
] | 543 |
2016-07-07T15:31:01.000Z
|
2022-03-31T10:58:32.000Z
|
nuke/pymmh3.py
|
jfpanisset/Cryptomatte
|
d7c71cff17a4e8895eb17520115aa45ff66b8540
|
[
"BSD-3-Clause"
] | 143 |
2016-07-07T16:56:38.000Z
|
2022-02-23T23:16:52.000Z
|
nuke/pymmh3.py
|
jfpanisset/Cryptomatte
|
d7c71cff17a4e8895eb17520115aa45ff66b8540
|
[
"BSD-3-Clause"
] | 158 |
2016-07-07T16:41:49.000Z
|
2022-03-21T17:57:28.000Z
|
'''
pymmh3 was written by Fredrik Kihlander and enhanced by Swapnil Gusani, and is placed in the public
domain. The authors hereby disclaim copyright to this source code.
pure python implementation of the murmur3 hash algorithm
https://code.google.com/p/smhasher/wiki/MurmurHash3
This was written for the times when you do not want to compile c-code and install modules,
and you only want a drop-in murmur3 implementation.
As this is purely python it is FAR from performant and if performance is anything that is needed
a proper c-module is suggested!
This module is written to have the same format as mmh3 python package found here for simple conversions:
https://pypi.python.org/pypi/mmh3/2.3.1
'''
import sys as _sys
if (_sys.version_info > (3, 0)):
else:
del _sys
def hash( key, seed = 0x0 ):
''' Implements 32bit murmur3 hash. '''
key = bytearray( xencode(key) )
length = len( key )
nblocks = int( length / 4 )
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
# body
for block_start in range( 0, nblocks * 4, 4 ):
# ??? big endian?
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 13 | h1 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 * 5 + 0xe6546b64 ) & 0xFFFFFFFF
# tail
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
unsigned_val = fmix( h1 ^ length )
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -( (unsigned_val ^ 0xFFFFFFFF) + 1 )
def hash128( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. '''
def hash128_x64( key, seed ):
''' Implements 128bit murmur3 hash for x64. '''
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
c1 = 0x87c37b91114253d5
c2 = 0x4cf5ad432745937f
#body
for block_start in range( 0, nblocks * 8, 8 ):
# ??? big endian?
k1 = key[ 2 * block_start + 7 ] << 56 | \
key[ 2 * block_start + 6 ] << 48 | \
key[ 2 * block_start + 5 ] << 40 | \
key[ 2 * block_start + 4 ] << 32 | \
key[ 2 * block_start + 3 ] << 24 | \
key[ 2 * block_start + 2 ] << 16 | \
key[ 2 * block_start + 1 ] << 8 | \
key[ 2 * block_start + 0 ]
k2 = key[ 2 * block_start + 15 ] << 56 | \
key[ 2 * block_start + 14 ] << 48 | \
key[ 2 * block_start + 13 ] << 40 | \
key[ 2 * block_start + 12 ] << 32 | \
key[ 2 * block_start + 11 ] << 24 | \
key[ 2 * block_start + 10 ] << 16 | \
key[ 2 * block_start + 9 ] << 8 | \
key[ 2 * block_start + 8 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( c2 * k1 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
h1 = ( h1 << 27 | h1 >> 37 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = ( h1 * 5 + 0x52dce729 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( c1 * k2 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
h2 = ( h2 << 31 | h2 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h2 * 5 + 0x38495ab5 ) & 0xFFFFFFFFFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
tail_size = length & 15
if tail_size >= 15:
k2 ^= key[ tail_index + 14 ] << 48
if tail_size >= 14:
k2 ^= key[ tail_index + 13 ] << 40
if tail_size >= 13:
k2 ^= key[ tail_index + 12 ] << 32
if tail_size >= 12:
k2 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k2 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k2 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k2 ^= key[ tail_index + 8 ]
if tail_size > 8:
k2 = ( k2 * c2 ) & 0xFFFFFFFFFFFFFFFF
k2 = ( k2 << 33 | k2 >> 31 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k2 = ( k2 * c1 ) & 0xFFFFFFFFFFFFFFFF
h2 ^= k2
if tail_size >= 8:
k1 ^= key[ tail_index + 7 ] << 56
if tail_size >= 7:
k1 ^= key[ tail_index + 6 ] << 48
if tail_size >= 6:
k1 ^= key[ tail_index + 5 ] << 40
if tail_size >= 5:
k1 ^= key[ tail_index + 4 ] << 32
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFFFFFFFFFF
k1 = ( k1 << 31 | k1 >> 33 ) & 0xFFFFFFFFFFFFFFFF # inlined ROTL64
k1 = ( k1 * c2 ) & 0xFFFFFFFFFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h1 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFFFFFFFFFF
return ( h2 << 64 | h1 )
def hash128_x86( key, seed ):
''' Implements 128bit murmur3 hash for x86. '''
length = len( key )
nblocks = int( length / 16 )
h1 = seed
h2 = seed
h3 = seed
h4 = seed
c1 = 0x239b961b
c2 = 0xab0e9789
c3 = 0x38b34ae5
c4 = 0xa1e38b93
#body
for block_start in range( 0, nblocks * 16, 16 ):
k1 = key[ block_start + 3 ] << 24 | \
key[ block_start + 2 ] << 16 | \
key[ block_start + 1 ] << 8 | \
key[ block_start + 0 ]
k2 = key[ block_start + 7 ] << 24 | \
key[ block_start + 6 ] << 16 | \
key[ block_start + 5 ] << 8 | \
key[ block_start + 4 ]
k3 = key[ block_start + 11 ] << 24 | \
key[ block_start + 10 ] << 16 | \
key[ block_start + 9 ] << 8 | \
key[ block_start + 8 ]
k4 = key[ block_start + 15 ] << 24 | \
key[ block_start + 14 ] << 16 | \
key[ block_start + 13 ] << 8 | \
key[ block_start + 12 ]
k1 = ( c1 * k1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( c2 * k1 ) & 0xFFFFFFFF
h1 ^= k1
h1 = ( h1 << 19 | h1 >> 13 ) & 0xFFFFFFFF # inlined ROTL32
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 * 5 + 0x561ccd1b ) & 0xFFFFFFFF
k2 = ( c2 * k2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( c3 * k2 ) & 0xFFFFFFFF
h2 ^= k2
h2 = ( h2 << 17 | h2 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
h2 = ( h2 + h3 ) & 0xFFFFFFFF
h2 = ( h2 * 5 + 0x0bcaa747 ) & 0xFFFFFFFF
k3 = ( c3 * k3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( c4 * k3 ) & 0xFFFFFFFF
h3 ^= k3
h3 = ( h3 << 15 | h3 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
h3 = ( h3 + h4 ) & 0xFFFFFFFF
h3 = ( h3 * 5 + 0x96cd1c35 ) & 0xFFFFFFFF
k4 = ( c4 * k4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( c1 * k4 ) & 0xFFFFFFFF
h4 ^= k4
h4 = ( h4 << 13 | h4 >> 19 ) & 0xFFFFFFFF # inlined ROTL32
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h4 = ( h4 * 5 + 0x32ac3b17 ) & 0xFFFFFFFF
#tail
tail_index = nblocks * 16
k1 = 0
k2 = 0
k3 = 0
k4 = 0
tail_size = length & 15
if tail_size >= 15:
k4 ^= key[ tail_index + 14 ] << 16
if tail_size >= 14:
k4 ^= key[ tail_index + 13 ] << 8
if tail_size >= 13:
k4 ^= key[ tail_index + 12 ]
if tail_size > 12:
k4 = ( k4 * c4 ) & 0xFFFFFFFF
k4 = ( k4 << 18 | k4 >> 14 ) & 0xFFFFFFFF # inlined ROTL32
k4 = ( k4 * c1 ) & 0xFFFFFFFF
h4 ^= k4
if tail_size >= 12:
k3 ^= key[ tail_index + 11 ] << 24
if tail_size >= 11:
k3 ^= key[ tail_index + 10 ] << 16
if tail_size >= 10:
k3 ^= key[ tail_index + 9 ] << 8
if tail_size >= 9:
k3 ^= key[ tail_index + 8 ]
if tail_size > 8:
k3 = ( k3 * c3 ) & 0xFFFFFFFF
k3 = ( k3 << 17 | k3 >> 15 ) & 0xFFFFFFFF # inlined ROTL32
k3 = ( k3 * c4 ) & 0xFFFFFFFF
h3 ^= k3
if tail_size >= 8:
k2 ^= key[ tail_index + 7 ] << 24
if tail_size >= 7:
k2 ^= key[ tail_index + 6 ] << 16
if tail_size >= 6:
k2 ^= key[ tail_index + 5 ] << 8
if tail_size >= 5:
k2 ^= key[ tail_index + 4 ]
if tail_size > 4:
k2 = ( k2 * c2 ) & 0xFFFFFFFF
k2 = ( k2 << 16 | k2 >> 16 ) & 0xFFFFFFFF # inlined ROTL32
k2 = ( k2 * c3 ) & 0xFFFFFFFF
h2 ^= k2
if tail_size >= 4:
k1 ^= key[ tail_index + 3 ] << 24
if tail_size >= 3:
k1 ^= key[ tail_index + 2 ] << 16
if tail_size >= 2:
k1 ^= key[ tail_index + 1 ] << 8
if tail_size >= 1:
k1 ^= key[ tail_index + 0 ]
if tail_size > 0:
k1 = ( k1 * c1 ) & 0xFFFFFFFF
k1 = ( k1 << 15 | k1 >> 17 ) & 0xFFFFFFFF # inlined ROTL32
k1 = ( k1 * c2 ) & 0xFFFFFFFF
h1 ^= k1
#finalization
h1 ^= length
h2 ^= length
h3 ^= length
h4 ^= length
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
h1 = fmix( h1 )
h2 = fmix( h2 )
h3 = fmix( h3 )
h4 = fmix( h4 )
h1 = ( h1 + h2 ) & 0xFFFFFFFF
h1 = ( h1 + h3 ) & 0xFFFFFFFF
h1 = ( h1 + h4 ) & 0xFFFFFFFF
h2 = ( h1 + h2 ) & 0xFFFFFFFF
h3 = ( h1 + h3 ) & 0xFFFFFFFF
h4 = ( h1 + h4 ) & 0xFFFFFFFF
return ( h4 << 96 | h3 << 64 | h2 << 32 | h1 )
key = bytearray( xencode(key) )
if x64arch:
return hash128_x64( key, seed )
else:
return hash128_x86( key, seed )
def hash64( key, seed = 0x0, x64arch = True ):
''' Implements 64bit murmur3 hash. Returns a tuple. '''
hash_128 = hash128( key, seed, x64arch )
unsigned_val1 = hash_128 & 0xFFFFFFFFFFFFFFFF
if unsigned_val1 & 0x8000000000000000 == 0:
signed_val1 = unsigned_val1
else:
signed_val1 = -( (unsigned_val1 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
unsigned_val2 = ( hash_128 >> 64 ) & 0xFFFFFFFFFFFFFFFF
if unsigned_val2 & 0x8000000000000000 == 0:
signed_val2 = unsigned_val2
else:
signed_val2 = -( (unsigned_val2 ^ 0xFFFFFFFFFFFFFFFF) + 1 )
return ( int( signed_val1 ), int( signed_val2 ) )
def hash_bytes( key, seed = 0x0, x64arch = True ):
''' Implements 128bit murmur3 hash. Returns a byte string. '''
hash_128 = hash128( key, seed, x64arch )
bytestring = ''
for i in range(0, 16, 1):
lsbyte = hash_128 & 0xFF
bytestring = bytestring + str( chr( lsbyte ) )
hash_128 = hash_128 >> 8
return bytestring
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser( 'pymurmur3', 'pymurmur [options] "string to hash"' )
parser.add_argument( '--seed', type = int, default = 0 )
parser.add_argument( 'strings', default = [], nargs='+')
opts = parser.parse_args()
for str_to_hash in opts.strings:
sys.stdout.write( '"%s" = 0x%08X\n' % ( str_to_hash, hash( str_to_hash ) ) )
| 31.311947 | 104 | 0.464283 |
dbfc56fb832ee5fc9af604dacd2a35c059519b31
| 950 |
py
|
Python
|
bindings/python/tests/test_factory.py
|
pscff/dlite
|
4365d828dcaa1736cc78ff6ed9a65592f198ba25
|
[
"MIT"
] | 10 |
2020-04-08T06:25:27.000Z
|
2022-03-15T06:54:53.000Z
|
bindings/python/tests/test_factory.py
|
pscff/dlite
|
4365d828dcaa1736cc78ff6ed9a65592f198ba25
|
[
"MIT"
] | 117 |
2019-12-16T14:43:41.000Z
|
2022-03-21T19:46:58.000Z
|
bindings/python/tests/test_factory.py
|
pscff/dlite
|
4365d828dcaa1736cc78ff6ed9a65592f198ba25
|
[
"MIT"
] | 5 |
2020-04-15T16:23:29.000Z
|
2021-12-07T08:40:54.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import dlite
thisdir = os.path.abspath(os.path.dirname(__file__))
url = 'json://' + thisdir + '/Person.json'
print('-- create: ExPerson')
ExPerson = dlite.classfactory(Person, url=url)
print('-- create: person1')
person1 = Person('Jack Daniel', 42, ['distilling', 'tasting'])
print('-- create: person2')
person2 = ExPerson('Jack Daniel', 42, ['distilling', 'tasting'])
person2.dlite_inst.save('json', 'persons.json', 'mode=w')
# Print json-representation of person2 using dlite
print(person2.dlite_inst.asjson(indent=2))
person3 = dlite.loadfactory(Person, 'json://persons.json')
person4 = dlite.objectfactory(person1, meta=person2.dlite_meta)
| 25 | 78 | 0.671579 |
dbfcfb1df1954ace1963bc30983b96adb222d711
| 807 |
py
|
Python
|
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | 1 |
2021-11-03T18:27:37.000Z
|
2021-11-03T18:27:37.000Z
|
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | null | null | null |
week_11_DS_N_Algorithm/03_Thr_Lecture/실습6_연속 부분 최대합.py
|
bky373/elice-racer-1st
|
ddea8079a1083796ed4f59c38650ff8f4333e6ef
|
[
"FSFAP"
] | 1 |
2021-02-10T15:21:53.000Z
|
2021-02-10T15:21:53.000Z
|
'''
nn , .
, 8 .
1 2 -4 5 3 -2 9 -10
, .
[1, 2, -4], [5, 3, -2, 9], [9, -10] .
[5, 3, -2, 9] ,
.
5+3+(-2)+9 = 15 .
1 2 -4 5 3 -2 9 -10
15
100.
'''
import sys
def getSubsum(data) :
'''
n list , .
'''
dp = [0] * len(data)
dp[0] = data[0]
for i in range(1, len(data)):
dp[i] = max(dp[i-1] + data[i], data[i])
return max(dp)
def main():
'''
.
'''
data = [int(x) for x in input().split()]
print(getSubsum(data))
if __name__ == "__main__":
main()
| 17.170213 | 61 | 0.537794 |
dbfd1a602dd992f412e1700c617d5bbf9b239826
| 505 |
py
|
Python
|
tests/test_dns.py
|
jensstein/mockdock
|
4eec294f33d929d361973c1708d2aa856a9900a0
|
[
"MIT"
] | null | null | null |
tests/test_dns.py
|
jensstein/mockdock
|
4eec294f33d929d361973c1708d2aa856a9900a0
|
[
"MIT"
] | 6 |
2020-03-24T16:45:10.000Z
|
2021-02-13T10:03:53.000Z
|
tests/test_dns.py
|
jensstein/mockdock
|
4eec294f33d929d361973c1708d2aa856a9900a0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import unittest
from mockdock import dns
| 38.846154 | 168 | 0.708911 |
dbfd45a1262d4d81ad4ca682e226d591f37c7fd4
| 1,490 |
py
|
Python
|
tests/conftest.py
|
zhongnansu/es-cli
|
e0656c21392e52a8b9cfafa69acfa0c13b743a9c
|
[
"Apache-2.0"
] | 6 |
2019-08-23T18:06:41.000Z
|
2020-05-06T18:26:53.000Z
|
tests/conftest.py
|
zhongnansu/es-cli
|
e0656c21392e52a8b9cfafa69acfa0c13b743a9c
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
zhongnansu/es-cli
|
e0656c21392e52a8b9cfafa69acfa0c13b743a9c
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2019, Amazon Web Services Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
We can define the fixture functions in this file to make them
accessible across multiple test modules.
"""
import os
import pytest
from utils import create_index, delete_index, get_connection
| 29.8 | 73 | 0.769799 |
dbfd8140aa71c6ce6288cd86d96c8cf8754cf91f
| 26,845 |
py
|
Python
|
Cogs/ServerStats.py
|
Damiian1/techwizardshardware
|
97ceafc15036be4136e860076d73d74f1887f041
|
[
"MIT"
] | null | null | null |
Cogs/ServerStats.py
|
Damiian1/techwizardshardware
|
97ceafc15036be4136e860076d73d74f1887f041
|
[
"MIT"
] | null | null | null |
Cogs/ServerStats.py
|
Damiian1/techwizardshardware
|
97ceafc15036be4136e860076d73d74f1887f041
|
[
"MIT"
] | null | null | null |
import asyncio
import discord
from datetime import datetime
from operator import itemgetter
from discord.ext import commands
from Cogs import Nullify
from Cogs import DisplayName
from Cogs import UserTime
from Cogs import Message
| 42.076803 | 254 | 0.537568 |
dbfdaede95b2536399d16c62c421baf5bd420ceb
| 6,688 |
py
|
Python
|
chess_commentary_model/transformers_model/dataset_preprocessing.py
|
Rseiji/TCC-2020
|
da68a49da38adf1bcf590b3028894d7834a28157
|
[
"MIT"
] | null | null | null |
chess_commentary_model/transformers_model/dataset_preprocessing.py
|
Rseiji/TCC-2020
|
da68a49da38adf1bcf590b3028894d7834a28157
|
[
"MIT"
] | 2 |
2020-08-30T22:47:54.000Z
|
2021-03-31T19:58:11.000Z
|
chess_commentary_model/transformers_model/dataset_preprocessing.py
|
Rseiji/TCC-2020
|
da68a49da38adf1bcf590b3028894d7834a28157
|
[
"MIT"
] | null | null | null |
"""Mtodos de preprocessamento de testes individuais
"""
import pandas as pd
import numpy as np
import math
def test_1(df, seed=0):
"""training: balanced; test: balanced
training: 80k (40k 0, 40k 1)
test: 20k (10k 0, 10k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:40000]
df_zeros_training = df_zeros.loc[:40000]
df_ones_test = df_ones.loc[40000:50000]
df_zeros_test = df_zeros.loc[40000:50000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_2(df, seed=0):
"""training: balanced; test: unbalanced
training: 80k (40k 0, 40k 1)
test: 20k (4k 0, 16k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:40000]
df_zeros_training = df_zeros.loc[:40000]
df_ones_test = df_ones.loc[40000:44000]
df_zeros_test = df_zeros.loc[40000:56000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_3(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 80k (16k 1, 64k 0)
test: 20k (4k 1, 16k 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:16000]
df_zeros_training = df_zeros.loc[:64000]
df_ones_test = df_ones.loc[16000:20000]
df_zeros_test = df_zeros.loc[64000:80000]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
##################################
## Tests on old dataset
##################################
def test_4(df, seed=0):
""" training: balanced; test: balanced
training: 58k (29k 0, 29k 1)
test: 14.5k (7.25k 0, 7.25k 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:29000]
df_zeros_training = df_zeros.loc[:29000]
df_ones_test = df_ones.loc[29000:36250]
df_zeros_test = df_zeros.loc[29000:36250]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_5(df, seed=0):
"""training: balanced; test: unbalanced
training: 58k (29000 0, 29000 1)
test: 14.5k (12905 0, 1595 1)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:29000]
df_zeros_training = df_zeros.loc[:29000]
df_ones_test = df_ones.loc[29000:30595]
df_zeros_test = df_zeros.loc[29000:41905]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
def test_6(df, seed=0):
"""training: unbalanced; test: unbalanced
training: 58k (6380 1, 51620 0)
test: 14.5k (1595 1, 12905 0)
"""
df_ones = df[df['label'] == 1]
df_zeros = df[df['label'] == 0]
df_ones = df_ones.sample(frac=1, random_state=seed).reset_index(drop=True)
df_zeros = df_zeros.sample(frac=1, random_state=seed).reset_index(drop=True)
df_ones_training = df_ones.loc[:6380]
df_zeros_training = df_zeros.loc[:51620]
df_ones_test = df_ones.loc[6380:7975]
df_zeros_test = df_zeros.loc[51620:64525]
df_training = pd.concat([df_ones_training, df_zeros_training])
df_training = df_training.sample(frac=1).reset_index(drop=True)
df_test = pd.concat([df_ones_test, df_zeros_test])
df_test = df_test.sample(frac=1).reset_index(drop=True)
sentences_train = df_training['comment'].tolist()
sentences_test = df_test['comment'].tolist()
labels_train = df_training['label'].tolist()
labels_test = df_test['label'].tolist()
return sentences_train, sentences_test, labels_train, labels_test
| 36.546448 | 80 | 0.689145 |
dbfdb987e6de76d1f36bf0f8ce7f9d972b1cbaed
| 7,103 |
py
|
Python
|
venv/Lib/site-packages/CoolProp/constants.py
|
kubakoziczak/gasSteamPowerPlant
|
e6c036cc66ee2ff0b3f2fc923d0991bf57295d61
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/CoolProp/constants.py
|
kubakoziczak/gasSteamPowerPlant
|
e6c036cc66ee2ff0b3f2fc923d0991bf57295d61
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/CoolProp/constants.py
|
kubakoziczak/gasSteamPowerPlant
|
e6c036cc66ee2ff0b3f2fc923d0991bf57295d61
|
[
"MIT"
] | null | null | null |
# This file is automatically generated by the generate_constants_module.py script in wrappers/Python.
# DO NOT MODIFY THE CONTENTS OF THIS FILE!
from __future__ import absolute_import
from . import _constants
INVALID_PARAMETER = _constants.INVALID_PARAMETER
igas_constant = _constants.igas_constant
imolar_mass = _constants.imolar_mass
iacentric_factor = _constants.iacentric_factor
irhomolar_reducing = _constants.irhomolar_reducing
irhomolar_critical = _constants.irhomolar_critical
iT_reducing = _constants.iT_reducing
iT_critical = _constants.iT_critical
irhomass_reducing = _constants.irhomass_reducing
irhomass_critical = _constants.irhomass_critical
iP_critical = _constants.iP_critical
iP_reducing = _constants.iP_reducing
iT_triple = _constants.iT_triple
iP_triple = _constants.iP_triple
iT_min = _constants.iT_min
iT_max = _constants.iT_max
iP_max = _constants.iP_max
iP_min = _constants.iP_min
idipole_moment = _constants.idipole_moment
iT = _constants.iT
iP = _constants.iP
iQ = _constants.iQ
iTau = _constants.iTau
iDelta = _constants.iDelta
iDmolar = _constants.iDmolar
iHmolar = _constants.iHmolar
iSmolar = _constants.iSmolar
iCpmolar = _constants.iCpmolar
iCp0molar = _constants.iCp0molar
iCvmolar = _constants.iCvmolar
iUmolar = _constants.iUmolar
iGmolar = _constants.iGmolar
iHelmholtzmolar = _constants.iHelmholtzmolar
iSmolar_residual = _constants.iSmolar_residual
iDmass = _constants.iDmass
iHmass = _constants.iHmass
iSmass = _constants.iSmass
iCpmass = _constants.iCpmass
iCp0mass = _constants.iCp0mass
iCvmass = _constants.iCvmass
iUmass = _constants.iUmass
iGmass = _constants.iGmass
iHelmholtzmass = _constants.iHelmholtzmass
iviscosity = _constants.iviscosity
iconductivity = _constants.iconductivity
isurface_tension = _constants.isurface_tension
iPrandtl = _constants.iPrandtl
ispeed_sound = _constants.ispeed_sound
iisothermal_compressibility = _constants.iisothermal_compressibility
iisobaric_expansion_coefficient = _constants.iisobaric_expansion_coefficient
ifundamental_derivative_of_gas_dynamics = _constants.ifundamental_derivative_of_gas_dynamics
ialphar = _constants.ialphar
idalphar_dtau_constdelta = _constants.idalphar_dtau_constdelta
idalphar_ddelta_consttau = _constants.idalphar_ddelta_consttau
ialpha0 = _constants.ialpha0
idalpha0_dtau_constdelta = _constants.idalpha0_dtau_constdelta
idalpha0_ddelta_consttau = _constants.idalpha0_ddelta_consttau
iBvirial = _constants.iBvirial
iCvirial = _constants.iCvirial
idBvirial_dT = _constants.idBvirial_dT
idCvirial_dT = _constants.idCvirial_dT
iZ = _constants.iZ
iPIP = _constants.iPIP
ifraction_min = _constants.ifraction_min
ifraction_max = _constants.ifraction_max
iT_freeze = _constants.iT_freeze
iGWP20 = _constants.iGWP20
iGWP100 = _constants.iGWP100
iGWP500 = _constants.iGWP500
iFH = _constants.iFH
iHH = _constants.iHH
iPH = _constants.iPH
iODP = _constants.iODP
iPhase = _constants.iPhase
iundefined_parameter = _constants.iundefined_parameter
INPUT_PAIR_INVALID = _constants.INPUT_PAIR_INVALID
QT_INPUTS = _constants.QT_INPUTS
PQ_INPUTS = _constants.PQ_INPUTS
QSmolar_INPUTS = _constants.QSmolar_INPUTS
QSmass_INPUTS = _constants.QSmass_INPUTS
HmolarQ_INPUTS = _constants.HmolarQ_INPUTS
HmassQ_INPUTS = _constants.HmassQ_INPUTS
DmolarQ_INPUTS = _constants.DmolarQ_INPUTS
DmassQ_INPUTS = _constants.DmassQ_INPUTS
PT_INPUTS = _constants.PT_INPUTS
DmassT_INPUTS = _constants.DmassT_INPUTS
DmolarT_INPUTS = _constants.DmolarT_INPUTS
HmolarT_INPUTS = _constants.HmolarT_INPUTS
HmassT_INPUTS = _constants.HmassT_INPUTS
SmolarT_INPUTS = _constants.SmolarT_INPUTS
SmassT_INPUTS = _constants.SmassT_INPUTS
TUmolar_INPUTS = _constants.TUmolar_INPUTS
TUmass_INPUTS = _constants.TUmass_INPUTS
DmassP_INPUTS = _constants.DmassP_INPUTS
DmolarP_INPUTS = _constants.DmolarP_INPUTS
HmassP_INPUTS = _constants.HmassP_INPUTS
HmolarP_INPUTS = _constants.HmolarP_INPUTS
PSmass_INPUTS = _constants.PSmass_INPUTS
PSmolar_INPUTS = _constants.PSmolar_INPUTS
PUmass_INPUTS = _constants.PUmass_INPUTS
PUmolar_INPUTS = _constants.PUmolar_INPUTS
HmassSmass_INPUTS = _constants.HmassSmass_INPUTS
HmolarSmolar_INPUTS = _constants.HmolarSmolar_INPUTS
SmassUmass_INPUTS = _constants.SmassUmass_INPUTS
SmolarUmolar_INPUTS = _constants.SmolarUmolar_INPUTS
DmassHmass_INPUTS = _constants.DmassHmass_INPUTS
DmolarHmolar_INPUTS = _constants.DmolarHmolar_INPUTS
DmassSmass_INPUTS = _constants.DmassSmass_INPUTS
DmolarSmolar_INPUTS = _constants.DmolarSmolar_INPUTS
DmassUmass_INPUTS = _constants.DmassUmass_INPUTS
DmolarUmolar_INPUTS = _constants.DmolarUmolar_INPUTS
FLUID_TYPE_PURE = _constants.FLUID_TYPE_PURE
FLUID_TYPE_PSEUDOPURE = _constants.FLUID_TYPE_PSEUDOPURE
FLUID_TYPE_REFPROP = _constants.FLUID_TYPE_REFPROP
FLUID_TYPE_INCOMPRESSIBLE_LIQUID = _constants.FLUID_TYPE_INCOMPRESSIBLE_LIQUID
FLUID_TYPE_INCOMPRESSIBLE_SOLUTION = _constants.FLUID_TYPE_INCOMPRESSIBLE_SOLUTION
FLUID_TYPE_UNDEFINED = _constants.FLUID_TYPE_UNDEFINED
iphase_liquid = _constants.iphase_liquid
iphase_supercritical = _constants.iphase_supercritical
iphase_supercritical_gas = _constants.iphase_supercritical_gas
iphase_supercritical_liquid = _constants.iphase_supercritical_liquid
iphase_critical_point = _constants.iphase_critical_point
iphase_gas = _constants.iphase_gas
iphase_twophase = _constants.iphase_twophase
iphase_unknown = _constants.iphase_unknown
iphase_not_imposed = _constants.iphase_not_imposed
NORMALIZE_GAS_CONSTANTS = _constants.NORMALIZE_GAS_CONSTANTS
CRITICAL_WITHIN_1UK = _constants.CRITICAL_WITHIN_1UK
CRITICAL_SPLINES_ENABLED = _constants.CRITICAL_SPLINES_ENABLED
SAVE_RAW_TABLES = _constants.SAVE_RAW_TABLES
ALTERNATIVE_TABLES_DIRECTORY = _constants.ALTERNATIVE_TABLES_DIRECTORY
ALTERNATIVE_REFPROP_PATH = _constants.ALTERNATIVE_REFPROP_PATH
ALTERNATIVE_REFPROP_HMX_BNC_PATH = _constants.ALTERNATIVE_REFPROP_HMX_BNC_PATH
ALTERNATIVE_REFPROP_LIBRARY_PATH = _constants.ALTERNATIVE_REFPROP_LIBRARY_PATH
REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS = _constants.REFPROP_DONT_ESTIMATE_INTERACTION_PARAMETERS
REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS = _constants.REFPROP_IGNORE_ERROR_ESTIMATED_INTERACTION_PARAMETERS
REFPROP_USE_GERG = _constants.REFPROP_USE_GERG
REFPROP_USE_PENGROBINSON = _constants.REFPROP_USE_PENGROBINSON
MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB = _constants.MAXIMUM_TABLE_DIRECTORY_SIZE_IN_GB
DONT_CHECK_PROPERTY_LIMITS = _constants.DONT_CHECK_PROPERTY_LIMITS
HENRYS_LAW_TO_GENERATE_VLE_GUESSES = _constants.HENRYS_LAW_TO_GENERATE_VLE_GUESSES
PHASE_ENVELOPE_STARTING_PRESSURE_PA = _constants.PHASE_ENVELOPE_STARTING_PRESSURE_PA
R_U_CODATA = _constants.R_U_CODATA
VTPR_UNIFAC_PATH = _constants.VTPR_UNIFAC_PATH
SPINODAL_MINIMUM_DELTA = _constants.SPINODAL_MINIMUM_DELTA
OVERWRITE_FLUIDS = _constants.OVERWRITE_FLUIDS
OVERWRITE_DEPARTURE_FUNCTION = _constants.OVERWRITE_DEPARTURE_FUNCTION
OVERWRITE_BINARY_INTERACTION = _constants.OVERWRITE_BINARY_INTERACTION
USE_GUESSES_IN_PROPSSI = _constants.USE_GUESSES_IN_PROPSSI
ASSUME_CRITICAL_POINT_STABLE = _constants.ASSUME_CRITICAL_POINT_STABLE
VTPR_ALWAYS_RELOAD_LIBRARY = _constants.VTPR_ALWAYS_RELOAD_LIBRARY
FLOAT_PUNCTUATION = _constants.FLOAT_PUNCTUATION
| 44.672956 | 120 | 0.887653 |
dbfe9381e4f6dcc57fd5c5d02265d7f565b40315
| 2,857 |
py
|
Python
|
torch_datasets/samplers/balanced_batch_sampler.py
|
mingruimingrui/torch-datasets
|
2640b8c4fa82156e68e617fc545a546b4e08dc4e
|
[
"MIT"
] | null | null | null |
torch_datasets/samplers/balanced_batch_sampler.py
|
mingruimingrui/torch-datasets
|
2640b8c4fa82156e68e617fc545a546b4e08dc4e
|
[
"MIT"
] | null | null | null |
torch_datasets/samplers/balanced_batch_sampler.py
|
mingruimingrui/torch-datasets
|
2640b8c4fa82156e68e617fc545a546b4e08dc4e
|
[
"MIT"
] | null | null | null |
import random
import torch.utils.data.sampler
| 38.608108 | 147 | 0.637382 |
dbfe9b7374d292dd3a07ffc92b4ebb9e7af2ac5d
| 1,416 |
py
|
Python
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,664 |
2015-01-03T09:35:21.000Z
|
2022-03-31T04:55:24.000Z
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 3,018 |
2015-02-19T20:16:10.000Z
|
2021-11-13T20:47:48.000Z
|
ambari-common/src/main/python/resource_management/libraries/functions/get_bare_principal.py
|
likenamehaojie/Apache-Ambari-ZH
|
5973025bd694cdbb4b49fb4c4e0d774782811ff6
|
[
"Apache-2.0"
] | 1,673 |
2015-01-06T14:14:42.000Z
|
2022-03-31T07:22:30.000Z
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import re
__all__ = ["get_bare_principal"]
def get_bare_principal(normalized_principal_name):
"""
Given a normalized principal name (nimbus/[email protected]) returns just the
primary component (nimbus)
:param normalized_principal_name: a string containing the principal name to process
:return: a string containing the primary component value or None if not valid
"""
bare_principal = None
if normalized_principal_name:
match = re.match(r"([^/@]+)(?:/[^@])?(?:@.*)?", normalized_principal_name)
if match:
bare_principal = match.group(1)
return bare_principal
| 33.714286 | 97 | 0.764831 |
dbff3b375851c03b4ae31fb20b30423a4b9c6ad5
| 1,162 |
py
|
Python
|
04/cross_validation.01.py
|
study-machine-learning/dongheon.shin
|
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
|
[
"MIT"
] | 2 |
2017-09-24T02:29:48.000Z
|
2017-10-05T11:15:22.000Z
|
04/cross_validation.01.py
|
study-machine-learning/dongheon.shin
|
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
|
[
"MIT"
] | null | null | null |
04/cross_validation.01.py
|
study-machine-learning/dongheon.shin
|
6103ef9c73b162603bc39a27e4ecca0f1ac35e57
|
[
"MIT"
] | null | null | null |
from sklearn import svm, metrics
import random
import re
lines = open("iris.csv", "r", encoding="utf-8").read().split("\n")
csv = list(map(to_columm, lines))
del csv[0]
random.shuffle(csv)
k = 5
csv_k = [[] for i in range(k)]
scores = []
for i in range(len(csv)):
csv_k[i % k].append(csv[i])
for test in csv_k:
train = []
for data in csv_k:
if test != data:
train += data
score = calculate_score(train, test)
scores.append(score)
print("score = ", scores)
print("avg = ", sum(scores) / len(scores))
| 16.84058 | 66 | 0.620482 |
e002150863388c3c73e7c985abca9ac6c4427a70
| 2,587 |
bzl
|
Python
|
third_party/org_specs2.bzl
|
wix/wix-oss-infra
|
b57ac2f0f66487cfdd08293a8fa389efe3d42c43
|
[
"MIT"
] | 3 |
2020-01-14T12:57:31.000Z
|
2021-06-06T20:47:57.000Z
|
third_party/org_specs2.bzl
|
wix/wix-oss-infra
|
b57ac2f0f66487cfdd08293a8fa389efe3d42c43
|
[
"MIT"
] | 20 |
2020-04-02T13:08:43.000Z
|
2020-11-05T11:27:40.000Z
|
third_party/org_specs2.bzl
|
wix/wix-oss-infra
|
b57ac2f0f66487cfdd08293a8fa389efe3d42c43
|
[
"MIT"
] | 1 |
2021-04-02T09:32:35.000Z
|
2021-04-02T09:32:35.000Z
|
load("@wix_oss_infra//:import_external.bzl", import_external = "safe_wix_scala_maven_import_external")
| 37.492754 | 102 | 0.68535 |
e0023b6272774adf06f1384bdb4cb510043c4a82
| 224 |
py
|
Python
|
task/w2/trenirovka/12-rivnist 2.py
|
beregok/pythontask
|
50394ff2b52ab4f3273ec9ddc4b504d1f7b3159e
|
[
"MIT"
] | 1 |
2019-09-29T14:19:54.000Z
|
2019-09-29T14:19:54.000Z
|
task/w2/trenirovka/12-rivnist 2.py
|
beregok/pythontask
|
50394ff2b52ab4f3273ec9ddc4b504d1f7b3159e
|
[
"MIT"
] | null | null | null |
task/w2/trenirovka/12-rivnist 2.py
|
beregok/pythontask
|
50394ff2b52ab4f3273ec9ddc4b504d1f7b3159e
|
[
"MIT"
] | null | null | null |
a = int(input())
b = int(input())
c = int(input())
d = int(input())
if a == 0 and b == 0:
print("INF")
else:
if (d - b * c / a) != 0 and (- b / a) == (- b // a):
print(- b // a)
else:
print("NO")
| 18.666667 | 56 | 0.397321 |
e0063a8b35dfc827fe158a159fe5d8b8ab703065
| 4,920 |
py
|
Python
|
get_data/speech_commands.py
|
patrick-kidger/generalised_shapelets
|
04930c89dc4673e2af402895fe67655f8375a808
|
[
"MIT"
] | 32 |
2020-05-31T17:41:58.000Z
|
2022-03-28T18:38:11.000Z
|
get_data/speech_commands.py
|
patrick-kidger/generalised_shapelets
|
04930c89dc4673e2af402895fe67655f8375a808
|
[
"MIT"
] | 1 |
2022-02-09T22:13:03.000Z
|
2022-02-09T23:55:28.000Z
|
get_data/speech_commands.py
|
patrick-kidger/generalised_shapelets
|
04930c89dc4673e2af402895fe67655f8375a808
|
[
"MIT"
] | 9 |
2020-07-17T16:50:24.000Z
|
2021-12-13T11:29:12.000Z
|
import os
import pathlib
import sklearn.model_selection
import tarfile
import torch
import torchaudio
import urllib.request
here = pathlib.Path(__file__).resolve().parent
if __name__ == '__main__':
main()
| 42.051282 | 120 | 0.566057 |
e008ab01b4020e37d916e20d303c66a51a23123e
| 5,652 |
py
|
Python
|
app/endpoints/products.py
|
duch94/spark_crud_test
|
94a514797700c2e929792f0424fb0e9e911489b7
|
[
"BSD-2-Clause"
] | null | null | null |
app/endpoints/products.py
|
duch94/spark_crud_test
|
94a514797700c2e929792f0424fb0e9e911489b7
|
[
"BSD-2-Clause"
] | null | null | null |
app/endpoints/products.py
|
duch94/spark_crud_test
|
94a514797700c2e929792f0424fb0e9e911489b7
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import datetime
from typing import List
from flask import Blueprint, jsonify, request, json
from app.models.products import Product, Category, products_categories
from app import db
products_blueprint = Blueprint('products', __name__)
def create_or_get_categories(p: dict) -> List[Category]:
"""
Func to get existing categories objects or create new otherwise
:param p: payload of request
:return: list of categories
"""
recevied_categories: List[Category] = [Category(name=cat) for cat in p['categories']]
categories = []
for cat in recevied_categories:
exists = db.session.query(db.exists().where(Category.name == cat.name)).all()[0][0]
if exists:
existing_category = Category.query.filter(Category.name == cat.name).all()[0]
categories.append(existing_category)
else:
categories.append(cat)
return categories
| 45.580645 | 120 | 0.608457 |
e008c8c892e467ea561589969c08eaa2c9b808db
| 1,701 |
py
|
Python
|
util/config/validators/test/test_validate_bitbucket_trigger.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 2,027 |
2019-11-12T18:05:48.000Z
|
2022-03-31T22:25:04.000Z
|
util/config/validators/test/test_validate_bitbucket_trigger.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 496 |
2019-11-12T18:13:37.000Z
|
2022-03-31T10:43:45.000Z
|
util/config/validators/test/test_validate_bitbucket_trigger.py
|
giuseppe/quay
|
a1b7e4b51974edfe86f66788621011eef2667e6a
|
[
"Apache-2.0"
] | 249 |
2019-11-12T18:02:27.000Z
|
2022-03-22T12:19:19.000Z
|
import pytest
from httmock import urlmatch, HTTMock
from util.config import URLSchemeAndHostname
from util.config.validator import ValidatorContext
from util.config.validators import ConfigValidationException
from util.config.validators.validate_bitbucket_trigger import BitbucketTriggerValidator
from test.fixtures import *
| 29.842105 | 87 | 0.671958 |
e008cc40a9e990beff8a7a594350250e113f3691
| 2,414 |
py
|
Python
|
Refraction.py
|
silkoch42/Geometric-Optics-from-QM
|
baf41b54c37835b527d5b98cb480d68bc2ff68c3
|
[
"MIT"
] | null | null | null |
Refraction.py
|
silkoch42/Geometric-Optics-from-QM
|
baf41b54c37835b527d5b98cb480d68bc2ff68c3
|
[
"MIT"
] | null | null | null |
Refraction.py
|
silkoch42/Geometric-Optics-from-QM
|
baf41b54c37835b527d5b98cb480d68bc2ff68c3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 15 16:51:16 2019
@author: Silvan
"""
import numpy as np
import scipy
import matplotlib.pyplot as plt
k=1000
n1=2.0
n2=1.0
alpha=np.pi/6.0
beta=np.arcsin(n2/n1*np.sin(alpha))
ya=1.0
xa=-ya*np.tan(alpha)
yb=-1.0
xb=-yb*np.tan(beta)
K2,x,r,i=K(3)
M=np.mean(K2[25:])
plt.plot(x,K2/M,label=r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$')
#plt.errorbar(x,K2/M,0.1*K2/M)
plt.xlabel(r'Integration range $R$')
plt.ylabel('Detection probabilty')
plt.legend(loc='best')
plt.text(2.4,0.2,r'$k=1000$')
#plt.text(1.1,0.5,r'$|\int_{-R}^{R}e^{i k s(x)}dx|^2$',fontsize=20)
plt.savefig('refraction_v3',dpi=200)
plt.show()
#N=20
#
#dx=np.linspace(0,10,N)
#
#P=np.ones(N)
#
#for i in range(N):
# print(i+1)
# P[i]=trans_amp(dx[i])
#
#
#plt.figure(1)
#plt.plot(dx,P/np.mean(P[20:]))
#plt.text(4.0,0.5,r'$|\int_{-\Delta x}^{\Delta x} e^{ik s(x)}dx$|',fontsize=20)
#plt.ylabel('Transition Amplitude')
#plt.xlabel(r'Integration Interval $ \Delta x$')
##plt.axis([0,10,0,1.1])
#plt.legend(loc='best')
##plt.savefig('refraction',dpi=200)
#plt.show()
#x=np.linspace(-5,5,100)
#
#plt.figure(2)
#plt.plot(x,s(x))
#plt.show()
#
#d=np.linspace(0,5,100)
#xa=-d/2
#xb=d/2
#plt.figure(3)
#plt.plot(d,kernel(xa,xb)**2)
#plt.show()
| 24.383838 | 95 | 0.583264 |
e0097d9cfd4f53f9d94ad08b373e659436909217
| 444 |
py
|
Python
|
readthedocs/docsitalia/management/commands/clear_elasticsearch.py
|
italia/readthedocs.org
|
440d3885380d20ec24081f76e26d20701749e179
|
[
"MIT"
] | 19 |
2018-03-28T12:28:35.000Z
|
2022-02-14T20:09:42.000Z
|
readthedocs/docsitalia/management/commands/clear_elasticsearch.py
|
berez23/docs.italia.it
|
440d3885380d20ec24081f76e26d20701749e179
|
[
"MIT"
] | 274 |
2017-10-10T07:59:04.000Z
|
2022-03-12T00:56:03.000Z
|
readthedocs/docsitalia/management/commands/clear_elasticsearch.py
|
italia/readthedocs.org
|
440d3885380d20ec24081f76e26d20701749e179
|
[
"MIT"
] | 13 |
2018-04-03T09:49:50.000Z
|
2021-04-18T22:04:15.000Z
|
"""Remove the readthedocs elasticsearch index."""
from __future__ import absolute_import
from django.conf import settings
from django.core.management.base import BaseCommand
from elasticsearch import Elasticsearch
| 23.368421 | 51 | 0.725225 |
e00b9ee8e43ae71af00a3fe383bedc3df745f04d
| 7,574 |
py
|
Python
|
train.py
|
vnbot2/BigGAN-PyTorch
|
1725269d52e05fbd4d06dac64aa4906a8ae7a760
|
[
"MIT"
] | null | null | null |
train.py
|
vnbot2/BigGAN-PyTorch
|
1725269d52e05fbd4d06dac64aa4906a8ae7a760
|
[
"MIT"
] | null | null | null |
train.py
|
vnbot2/BigGAN-PyTorch
|
1725269d52e05fbd4d06dac64aa4906a8ae7a760
|
[
"MIT"
] | null | null | null |
""" BigGAN: The Authorized Unofficial PyTorch release
Code by A. Brock and A. Andonian
This code is an unofficial reimplementation of
"Large-Scale GAN Training for High Fidelity Natural Image Synthesis,"
by A. Brock, J. Donahue, and K. Simonyan (arXiv 1809.11096).
Let's go.
"""
import datetime
import time
import torch
import dataset
import BigGAN
import train_fns
import utils
from common import *
# IMG_SIZE = 64
# IMG_SIZE_2 = IMG_SIZE * 2
if __name__ == '__main__':
main()
| 40.287234 | 103 | 0.597571 |
e00c71d6078595059b1d0af82650622e80499174
| 1,693 |
py
|
Python
|
geocamUtil/tempfiles.py
|
geocam/geocamUtilWeb
|
b64fc063c64b4b0baa140db4c126f2ff980756ab
|
[
"NASA-1.3"
] | 4 |
2017-03-03T16:24:24.000Z
|
2018-06-24T05:50:40.000Z
|
geocamUtil/tempfiles.py
|
geocam/geocamUtilWeb
|
b64fc063c64b4b0baa140db4c126f2ff980756ab
|
[
"NASA-1.3"
] | 1 |
2021-09-29T17:17:30.000Z
|
2021-09-29T17:17:30.000Z
|
geocamUtil/tempfiles.py
|
geocam/geocamUtilWeb
|
b64fc063c64b4b0baa140db4c126f2ff980756ab
|
[
"NASA-1.3"
] | 1 |
2017-12-19T20:45:53.000Z
|
2017-12-19T20:45:53.000Z
|
# __BEGIN_LICENSE__
#Copyright (c) 2015, United States Government, as represented by the
#Administrator of the National Aeronautics and Space Administration.
#All rights reserved.
# __END_LICENSE__
import os
import time
import random
import shutil
from glob import glob
import traceback
import sys
from geocamUtil import FileUtil
from django.conf import settings
| 27.306452 | 93 | 0.617247 |
e00d7dd12724a0363ee40d8c349e7cccfb71d6f4
| 5,752 |
py
|
Python
|
Ex1:Tests/ex2.py
|
Lludion/Exercises-SE
|
4d5b2b4f2989a3e2c7891ba2b766394dbfb43973
|
[
"MIT"
] | null | null | null |
Ex1:Tests/ex2.py
|
Lludion/Exercises-SE
|
4d5b2b4f2989a3e2c7891ba2b766394dbfb43973
|
[
"MIT"
] | null | null | null |
Ex1:Tests/ex2.py
|
Lludion/Exercises-SE
|
4d5b2b4f2989a3e2c7891ba2b766394dbfb43973
|
[
"MIT"
] | null | null | null |
# Ce fichier contient (au moins) cinq erreurs.
# Instructions:
# - tester jusqu' atteindre 100% de couverture;
# - corriger les bugs;"
# - envoyer le diff ou le dpt git par email."""
import hypothesis
from hypothesis import given, settings
from hypothesis.strategies import integers, lists
| 31.26087 | 112 | 0.604312 |
e00dbb3c20046835e182d01718caf34d09944176
| 22,455 |
py
|
Python
|
python/snewpy/snowglobes.py
|
svalder/snewpy
|
5723189ae3dce3506f2fab056bbef24c9ab1a31f
|
[
"BSD-3-Clause"
] | null | null | null |
python/snewpy/snowglobes.py
|
svalder/snewpy
|
5723189ae3dce3506f2fab056bbef24c9ab1a31f
|
[
"BSD-3-Clause"
] | null | null | null |
python/snewpy/snowglobes.py
|
svalder/snewpy
|
5723189ae3dce3506f2fab056bbef24c9ab1a31f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""The ``snewpy.snowglobes`` module contains functions for interacting with SNOwGLoBES.
`SNOwGLoBES <https://github.com/SNOwGLoBES/snowglobes>`_ can estimate detected
event rates from a given input supernova neutrino flux. It supports many
different neutrino detectors, detector materials and interaction channels.
There are three basic steps to using SNOwGLoBES from SNEWPY:
* **Generating input files for SNOwGLoBES:**
There are two ways to do this, either generate a time series or a fluence file. This is done taking as input the supernova simulation model.
The first will evaluate the neutrino flux at each time step, the latter will compute the integrated neutrino flux (fluence) in the time bin.
The result is a compressed .tar file containing all individual input files.
* **Running SNOwGLoBES:**
This step convolves the fluence generated in the previous step with the cross-sections for the interaction channels happening in various detectors supported by SNOwGLoBES.
It takes into account the effective mass of the detector as well as a smearing matrix describing the energy-dependent detection efficiency.
The output gives the number of events detected as a function of energy for each interaction channel, integrated in a given time window (or time bin), or in a snapshot in time.
* **Collating SNOwGLoBES outputs:**
This step puts together all the interaction channels and time bins evaluated by SNOwGLoBES in a single file (for each detector and for each time bin).
The output tables allow to build the detected neutrino energy spectrum and neutrino time distribution, for each reaction channel or the sum of them.
"""
import io
import logging
import os
import re
import tarfile
from pathlib import Path
from tempfile import TemporaryDirectory
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from tqdm.auto import tqdm
import snewpy.models
from snewpy.flavor_transformation import *
from snewpy.neutrino import Flavor, MassHierarchy
from snewpy.snowglobes_interface import SNOwGLoBES
logger = logging.getLogger(__name__)
def generate_time_series(model_path, model_type, transformation_type, d, output_filename=None, ntbins=30, deltat=None):
"""Generate time series files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
ntbins : int
Number of time slices. Will be ignored if ``deltat`` is also given.
deltat : astropy.Quantity or None
Length of time slices.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
# Subsample the model time. Default to 30 time slices.
tmin = snmodel.get_time()[0]
tmax = snmodel.get_time()[-1]
if deltat is not None:
dt = deltat
ntbins = int((tmax-tmin)/dt)
else:
dt = (tmax - tmin) / (ntbins+1)
tedges = np.arange(tmin/u.s, tmax/u.s, dt/u.s)*u.s
times = 0.5*(tedges[1:] + tedges[:-1])
# Generate output.
if output_filename is not None:
tfname = output_filename + 'kpc.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(tmin, tmax, ntbins, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV # 1MeV
# Loop over sampled times.
for i, t in enumerate(times):
osc_spectra = snmodel.get_transformed_spectra(t, energy, flavor_transformation)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
model_file_root, _ = os.path.splitext(model_file)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(tmin/u.s, tmax/u.s, ntbins, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def generate_fluence(model_path, model_type, transformation_type, d, output_filename=None, tstart=None, tend=None):
"""Generate fluence files in SNOwGLoBES format.
This version will subsample the times in a supernova model, produce energy
tables expected by SNOwGLoBES, and compress the output into a tarfile.
Parameters
----------
model_path : str
Input file containing neutrino flux information from supernova model.
model_type : str
Format of input file. Matches the name of the corresponding class in :py:mod:`snewpy.models`.
transformation_type : str
Name of flavor transformation. See snewpy.flavor_transformation documentation for possible values.
d : int or float
Distance to supernova in kpc.
output_filename : str or None
Name of output file. If ``None``, will be based on input file name.
tstart : astropy.Quantity or None
Start of time interval to integrate over, or list of start times of the time series bins.
tend : astropy.Quantity or None
End of time interval to integrate over, or list of end times of the time series bins.
Returns
-------
str
Path of compressed .tar file with neutrino flux data.
"""
model_class = getattr(snewpy.models.ccsn, model_type)
# Choose flavor transformation. Use dict to associate the transformation name with its class.
flavor_transformation_dict = {'NoTransformation': NoTransformation(), 'AdiabaticMSW_NMO': AdiabaticMSW(mh=MassHierarchy.NORMAL), 'AdiabaticMSW_IMO': AdiabaticMSW(mh=MassHierarchy.INVERTED), 'NonAdiabaticMSWH_NMO': NonAdiabaticMSWH(mh=MassHierarchy.NORMAL), 'NonAdiabaticMSWH_IMO': NonAdiabaticMSWH(mh=MassHierarchy.INVERTED), 'TwoFlavorDecoherence': TwoFlavorDecoherence(), 'ThreeFlavorDecoherence': ThreeFlavorDecoherence(), 'NeutrinoDecay_NMO': NeutrinoDecay(mh=MassHierarchy.NORMAL), 'NeutrinoDecay_IMO': NeutrinoDecay(mh=MassHierarchy.INVERTED)}
flavor_transformation = flavor_transformation_dict[transformation_type]
model_dir, model_file = os.path.split(os.path.abspath(model_path))
snmodel = model_class(model_path)
#set the timings up
#default if inputs are None: full time window of the model
if tstart is None:
tstart = snmodel.get_time()[0]
tend = snmodel.get_time()[-1]
try:
if len(tstart/u.s) > 0:
t0 = tstart[0]
t1 = tend[-1]
nbin = len(tstart/u.s)
except:
t0 = tstart
t1 = tend
nbin = 1
times = 0.5*(tstart + tend)
model_times = snmodel.get_time()
model_tstart = model_times*1.0
model_tend = model_times*1.0
model_tstart[0] = model_times[0]
for i in range(1, len(model_times), 1):
model_tstart[i] = 0.5*(model_times[i]+model_times[i-1])
model_tend[i-1] = model_tstart[i]
model_tend[len(model_times)-1] = model_times[-1]
if nbin > 1:
starting_index = np.zeros(len(times), dtype=np.int64)
ending_index = np.zeros(len(times), dtype=np.int64)
for i in range(len(tstart)):
starting_index[i] = next(j for j, t in enumerate(model_tend) if t > tstart[i])
ending_index[i] = next(j for j, t in enumerate(model_tend) if t >= tend[i])
else:
starting_index = [next(j for j, t in enumerate(model_tend) if t > tstart)]
ending_index = [next(j for j, t in enumerate(model_tend) if t >= tend)]
# Generate output.
if output_filename is not None:
tfname = output_filename+'.tar.bz2'
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
tfname = model_file_root + '.' + transformation_type + '.{:.3f},{:.3f},{:d}-{:.1f}'.format(t0, t1, nbin, d) + 'kpc.tar.bz2'
with tarfile.open(os.path.join(model_dir, tfname), 'w:bz2') as tf:
#creates file in tar archive that gives information on parameters
output = '\n'.join(map(str, transformation_type)).encode('ascii')
tf.addfile(tarfile.TarInfo(name='parameterinfo'), io.BytesIO(output))
MeV = 1.60218e-6 * u.erg
energy = np.linspace(0, 100, 501) * MeV
# Loop over sampled times.
for i in range(nbin):
if nbin > 1:
ta = tstart[i]
tb = tend[i]
t = times[i]
dt = tb-ta
else:
ta = tstart
tb = tend
t = times
dt = tb-ta
#first time bin of model in requested interval
osc_spectra = snmodel.get_transformed_spectra(model_times[starting_index[i]], energy, flavor_transformation)
if dt < model_tend[starting_index[i]]-ta:
dt = dt
else:
for flavor in Flavor:
osc_spectra[flavor] *= (model_tend[starting_index[i]]-ta)
#intermediate time bins of model in requested interval
for j in range(starting_index[i]+1, ending_index[i], 1):
temp_spectra = snmodel.get_transformed_spectra(model_times[j], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(model_tend[j]-model_tstart[j])
#last time bin of model in requested interval
temp_spectra = snmodel.get_transformed_spectra(
model_times[ending_index[i]], energy, flavor_transformation)
for flavor in Flavor:
osc_spectra[flavor] += temp_spectra[flavor]*(tb-model_tstart[ending_index[i]])
for flavor in Flavor:
osc_spectra[flavor] /= (tb-ta)
osc_fluence = {}
table = []
table.append('# TBinMid={:g}sec TBinWidth={:g}s EBinWidth=0.2MeV Fluence at Earth for this timebin in neutrinos per cm^2'.format(t, dt))
table.append('# E(GeV) NuE NuMu NuTau aNuE aNuMu aNuTau')
# Generate energy + number flux table.
for j, E in enumerate(energy):
for flavor in Flavor:
osc_fluence[flavor] = osc_spectra[flavor][j] * dt * 0.2 * MeV / (4.*np.pi*(d*1000*3.086e+18)**2)
s = '{:17.8E}'.format(E/(1e3 * MeV))
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_E_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
s = '{}{:17.8E}'.format(s, osc_fluence[Flavor.NU_X_BAR])
table.append(s)
logging.debug(s)
# Encode energy/flux table and output to file in tar archive.
output = '\n'.join(table).encode('ascii')
extension = ".dat"
if output_filename is not None:
if nbin > 1:
filename = output_filename+"_"+str(i)+extension
else:
filename = output_filename+extension
else:
model_file_root, _ = os.path.splitext(model_file) # strip extension (if present)
filename = model_file_root + '.tbin{:01d}.'.format(i+1) + transformation_type + \
'.{:.3f},{:.3f},{:01d}-{:.1f}kpc{}'.format(t0, t1, nbin, d, extension)
info = tarfile.TarInfo(name=filename)
info.size = len(output)
tf.addfile(info, io.BytesIO(output))
return os.path.join(model_dir, tfname)
def simulate(SNOwGLoBESdir, tarball_path, detector_input="all", verbose=False):
"""Takes as input the neutrino flux files and configures and runs the supernova script inside SNOwGLoBES, which outputs calculated event rates expected for a given (set of) detector(s). These event rates are given as a function of the neutrino energy and time, for each interaction channel.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
"""
sng = SNOwGLoBES(SNOwGLoBESdir)
if detector_input == 'all':
detector_input = list(sng.detectors)
detector_input.remove('d2O')
elif isinstance(detector_input,str):
detector_input = [detector_input]
result = {}
#Extracts data from tarfile and sets up lists of paths and fluxfilenames for later use
with TemporaryDirectory(prefix='snowglobes') as tempdir:
with tarfile.open(tarball_path) as tar:
tar.extractall(tempdir)
flux_files = list(Path(tempdir).glob('*.dat'))
if len(detector_input)>0:
detector_input = tqdm(detector_input, desc='Detectors', leave=False)
for det in detector_input:
res=sng.run(flux_files, det)
result[det]=dict(zip((f.stem for f in flux_files),res))
# save result to file for re-use in collate()
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Saving simulation results to {cache_file}')
np.save(cache_file, result)
return result
re_chan_label = re.compile(r'nu(e|mu|tau)(bar|)_([A-Z][a-z]*)(\d*)_?(.*)')
def collate(SNOwGLoBESdir, tarball_path, detector_input="all", skip_plots=False, verbose=False, remove_generated_files=True):
"""Collates SNOwGLoBES output files and generates plots or returns a data table.
Parameters
----------
SNOwGLoBESdir : str
Path to directory where SNOwGLoBES is installed.
tarball_path : str
Path of compressed .tar file produced e.g. by ``generate_time_series()`` or ``generate_fluence()``.
detector_input : str
Name of detector. If ``"all"``, will use all detectors supported by SNOwGLoBES.
skip_plots: bool
If False, it gives as output the plot of the energy distribution for each time bin and for each interaction channel.
verbose : bool
Whether to generate verbose output, e.g. for debugging.
remove_generated_files: bool
Remove the output files from SNOwGLoBES, collated files, and .png's made for this snewpy run.
Returns
-------
dict
Dictionary of data tables: One table per time bin; each table contains in the first column the energy bins, in the remaining columns the number of events for each interaction channel in the detector.
"""
#read the results from storage
cache_file = tarball_path[:tarball_path.rfind('.tar')] + '.npy'
logging.info(f'Reading tables from {cache_file}')
tables = np.load(cache_file, allow_pickle=True).tolist()
#This output is similar to what produced by:
#tables = simulate(SNOwGLoBESdir, tarball_path,detector_input)
#dict for old-style results, for backward compatibiity
results = {}
#save collated files:
with TemporaryDirectory(prefix='snowglobes') as tempdir:
tempdir = Path(tempdir)
for det in tables:
results[det] = {}
for flux,t in tables[det].items():
t = aggregate_channels(t,nc='nc_',e='_e')
for w in ['weighted','unweighted']:
for s in ['smeared','unsmeared']:
table = t[w][s]
filename_base = f'{flux}_{det}_events_{s}_{w}'
filename = tempdir/f'Collated_{filename_base}.dat'
#save results to text files
with open(filename,'w') as f:
f.write(table.to_string(float_format='%23.15g'))
#format the results for the output
header = 'Energy '+' '.join(list(table.columns))
data = table.to_numpy().T
index = table.index.to_numpy()
data = np.concatenate([[index],data])
results[filename.name] = {'header':header,'data':data}
#optionally plot the results
if skip_plots is False:
plt.figure(dpi=300)
do_plot(table,(flux,det,w,s))
filename = tempdir/f'{filename_base}_log_plot.png'
plt.savefig(filename.with_suffix('.png'), dpi=300, bbox_inches='tight')
#Make a tarfile with the condensed data files and plots
output_name = Path(tarball_path).stem
output_name = output_name[:output_name.rfind('.tar')]+'_SNOprocessed'
output_path = Path(tarball_path).parent/(output_name+'.tar.gz')
with tarfile.open(output_path, "w:gz") as tar:
for file in tempdir.iterdir():
tar.add(file,arcname=output_name+'/'+file.name)
logging.info(f'Created archive: {output_path}')
return results
| 46.298969 | 553 | 0.631485 |
e00e074bf789711cc01d53bcaa030d52c4e69f5b
| 4,621 |
py
|
Python
|
rlcycle/dqn_base/loss.py
|
cyoon1729/Rlcycle
|
5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569
|
[
"MIT"
] | 128 |
2020-06-29T01:40:36.000Z
|
2022-03-29T15:37:39.000Z
|
rlcycle/dqn_base/loss.py
|
cyoon1729/Rlcycle
|
5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569
|
[
"MIT"
] | 8 |
2020-06-29T03:51:50.000Z
|
2020-07-22T23:55:47.000Z
|
rlcycle/dqn_base/loss.py
|
cyoon1729/Rlcycle
|
5c65b9dd61a6fd5d6dfe92f0b3e04bf309828569
|
[
"MIT"
] | 24 |
2020-07-02T06:03:03.000Z
|
2022-03-22T11:59:53.000Z
|
from typing import List, Tuple
from omegaconf import DictConfig
import torch
import torch.nn as nn
import torch.nn.functional as F
from rlcycle.common.abstract.loss import Loss
| 34.485075 | 86 | 0.61069 |
e00f4579dad4a0f1f3310721291b602f532b6bf5
| 12,518 |
py
|
Python
|
scripts/gap_filling_viewer.py
|
raphischer/probgf
|
01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd
|
[
"MIT"
] | 3 |
2020-11-19T10:28:57.000Z
|
2021-04-15T17:16:24.000Z
|
scripts/gap_filling_viewer.py
|
raphischer/probgf
|
01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd
|
[
"MIT"
] | null | null | null |
scripts/gap_filling_viewer.py
|
raphischer/probgf
|
01bd2be85aa98afd79fc05c1eb3e260b2bcd2ebd
|
[
"MIT"
] | null | null | null |
"""viewer application which allows to interactively view spatio-temporal gap filling results"""
import os
import argparse
from datetime import datetime, timedelta
from tkinter import Canvas, Tk, Button, RAISED, DISABLED, SUNKEN, NORMAL
import numpy as np
from PIL import Image, ImageTk
import probgf.media as media
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-l', '--left', default='imgs/original/',
help='directory with images which are shown on the left')
parser.add_argument('-r', '--right', default='imgs/pred_outline_lin_spatial_clouds0_2/',
help='directory with images which are shown on the right')
parser.add_argument('-m', '--masks', default='imgs/mask/',
help='directory with mask images')
parser.add_argument('-R', '--report', default='report_lin_spatial_clouds0_2.csv',
help='report containing date and error information for the right hand images')
parser.add_argument('-y', '--year', type=int, default=2018,
help='year of data acquisition')
parser.add_argument('-W', '--width', type=int, default=1280,
help='window width')
parser.add_argument('-H', '--height', type=int, default=720,
help='window height')
args = parser.parse_args()
imgs_o = [Image.open(img) for img in sorted([os.path.join(args.left, img) for img in os.listdir(args.left)])]
imgs_p = [Image.open(img) for img in sorted([os.path.join(args.right, img) for img in os.listdir(args.right)])]
imgs_m = [Image.open(img) for img in sorted([os.path.join(args.masks, img) for img in os.listdir(args.masks)])]
report = np.genfromtxt(args.report, delimiter=',', dtype=float)[1:-1]
dates = [(datetime(args.year, 1, 1) + timedelta(int(report[day, 1]) - 1)).strftime('%b %d %Y') for day in range(report.shape[0])]
errors = ['{:4.1f}'.format(error) if error != 0.0 else 'n.a. ' for error in report[:, 5]]
logos = [media.logo1, media.logo2, media.logo3]
if len(imgs_o) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.left, args.report))
if len(imgs_p) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.right, args.report))
if len(imgs_m) != len(dates):
raise RuntimeError('Different number of images in {} than days in the report {}!'.format(args.masks, args.report))
root = Tk()
root.title('Gap Filling Viewer')
root.geometry("%dx%d+0+0" % (args.width, args.height))
MainWindow(root, args.width, args.height, imgs_p, imgs_o, imgs_m, dates, errors, logos)
root.focus_set()
root.mainloop()
| 51.941909 | 158 | 0.616552 |
e00f57f732929e05a58cd0ef2eae47d08e8561a9
| 4,946 |
py
|
Python
|
paypal/pro/tests.py
|
pdfcrowd/django-paypal
|
0ea56dc6c799204f0f8719481f94d0c79de6eff5
|
[
"Unlicense",
"MIT"
] | 1 |
2019-06-13T15:59:48.000Z
|
2019-06-13T15:59:48.000Z
|
pro/tests.py
|
sirmmo/django-paypal
|
0c8aeec1c319a08ce1bfdf828534d01b69b8fa27
|
[
"MIT",
"Unlicense"
] | null | null | null |
pro/tests.py
|
sirmmo/django-paypal
|
0c8aeec1c319a08ce1bfdf828534d01b69b8fa27
|
[
"MIT",
"Unlicense"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.core.handlers.wsgi import WSGIRequest
from django.forms import ValidationError
from django.http import QueryDict
from django.test import TestCase
from django.test.client import Client
from paypal.pro.fields import CreditCardField
from paypal.pro.helpers import PayPalWPP, PayPalError
RF = RequestFactory()
REQUEST = RF.get("/pay/", REMOTE_ADDR="127.0.0.1:8000")
# """Dummy class for testing PayPalWPP."""
# responses = {
# # @@@ Need some reals data here.
# "DoDirectPayment": """ack=Success×tamp=2009-03-12T23%3A52%3A33Z&l_severitycode0=Error&l_shortmessage0=Security+error&l_longmessage0=Security+header+is+not+valid&version=54.0&build=854529&l_errorcode0=&correlationid=""",
# }
#
# def _request(self, data):
# return self.responses["DoDirectPayment"]
### DoExpressCheckoutPayment
# PayPal Request:
# {'amt': '10.00',
# 'cancelurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'custom': u'website_id=480&cname=1',
# 'inv': u'website-480-cname',
# 'method': 'DoExpressCheckoutPayment',
# 'next': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'payerid': u'BN5JZ2V7MLEV4',
# 'paymentaction': 'Sale',
# 'returnurl': u'http://xxx.xxx.xxx.xxx/deploy/480/upgrade/?upgrade=cname',
# 'token': u'EC-6HW17184NE0084127'}
#
# PayPal Response:
# {'ack': 'Success',
# 'amt': '10.00',
# 'build': '848077',
# 'correlationid': '375f4773c3d34',
# 'currencycode': 'USD',
# 'feeamt': '0.59',
# 'ordertime': '2009-03-04T20:56:08Z',
# 'paymentstatus': 'Completed',
# 'paymenttype': 'instant',
# 'pendingreason': 'None',
# 'reasoncode': 'None',
# 'taxamt': '0.00',
# 'timestamp': '2009-03-04T20:56:09Z',
# 'token': 'EC-6HW17184NE0084127',
# 'transactionid': '3TG42202A7335864V',
# 'transactiontype': 'expresscheckout',
# 'version': '54.0'}
| 32.973333 | 234 | 0.593207 |
e00f75413d6a65ba71109974edd248bc1533ce8f
| 1,010 |
py
|
Python
|
Hackerrank_Bot_Saves_Princess.py
|
madhurgupta96/Algorithmic-Journey
|
75868af1050c99fc25e295812ba1a47468c6737f
|
[
"Apache-2.0"
] | null | null | null |
Hackerrank_Bot_Saves_Princess.py
|
madhurgupta96/Algorithmic-Journey
|
75868af1050c99fc25e295812ba1a47468c6737f
|
[
"Apache-2.0"
] | null | null | null |
Hackerrank_Bot_Saves_Princess.py
|
madhurgupta96/Algorithmic-Journey
|
75868af1050c99fc25e295812ba1a47468c6737f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 19:46:40 2020
@author: Intel
"""
m = int(input())
grid = []
for i in range(0, m):
grid.append(input().strip())
displayPathtoPrincess(m,grid)
| 22.954545 | 40 | 0.372277 |
e01063b5b93496a8c88b374770c28bc942feb23d
| 65,397 |
py
|
Python
|
gelviz/basic.py
|
HiDiHlabs/gelviz
|
515f0462738b44609679c2a26c7d8ac3ed3b4b2b
|
[
"BSD-3-Clause"
] | null | null | null |
gelviz/basic.py
|
HiDiHlabs/gelviz
|
515f0462738b44609679c2a26c7d8ac3ed3b4b2b
|
[
"BSD-3-Clause"
] | null | null | null |
gelviz/basic.py
|
HiDiHlabs/gelviz
|
515f0462738b44609679c2a26c7d8ac3ed3b4b2b
|
[
"BSD-3-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import pybedtools
import pandas as pnd
import numpy as np
import tabix
import matplotlib.ticker as ticker
from matplotlib.patches import Rectangle
from matplotlib.patches import Arrow
from matplotlib.path import Path
from matplotlib.patches import PathPatch
import matplotlib.cm as cm
import matplotlib
import tabix
import math
def plotGenes(genes_bed,
exons_bed,
introns_bed,
region_bed,
blacklist=None,
gene_map=None,
plot_gene_ids=True,
y_max=None,
distance_ratio=0.1,
ax=None,
plot_legend=False,
legend_loc="lower right",
color_plus="#80b1d3",
color_minus="#fb8072"):
"""Function for plotting gene structures, i.e. introns exons of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TX start,
and TX end of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param exons_bed: :class:`pybedtools.BedTool` object containing exons of
genes.
:type exons_bed: :class:`pybedtools.BedTool`
:param introns_bed: :class:`pybedtools.BedTool` object containing introns
:type introns_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the one
region, for which the gene plot is created.
:type region_bed: :class:`pybedtools.BedTool`
:param blacklist: List of gene names, for genes that should not be shown on
the plot, default is None
:type blacklist: list, optional
:param plot_gene_ids: If True, all gene ids will be included in the plot,
False otherwise, default is True
:type plot_gene_ids: bool, optional
:param y_max: Max y value in the gene plot. If not set, then y_max is the
max number of stacked genes, default is None.
:type y_max: bool, optional
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked, default is 0.1.
:type distance_ratio: float, optional
:param ax: Axes instance on which the genes are plotted, default is None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True, a legend describing plus or minus stranded
genes is plotted, False otherwise. Default is False.
:type plot_legend: bool, optional
:param legend_loc: Location of the legend. Either of "lower left",
"lower right", "upper left", "upper right", default is "lower right".
:type legend_loc: str, optional
:param color_plus: Color code for plus stranded genes, default is "#80b1d3".
:type color_plus: str, optional.
:param color_minus: Color code for minus stranded genes, default is
"#fb8072".
:type color_minus: str, optional.
:return: Tuple of max_y_pos+1.5, patch_list, patch_description_list, where
1. max_y_pos+1.5 is the max_y_position + 1.5. max_y_pos defines the \
number of stacked genes.
2. patch_list is the list of patches drawn on the ax.
3. patch_description_list is the list of descriptions for the patches \
drawn on the ax.
:rtype: list
"""
ax = ax if ax is not None else plt.gca()
genes_in_region = genes_bed
exons_in_region = exons_bed
introns_in_region = introns_bed
region_border_up = int(region_bed[0][1])
region_border_down = int(region_bed[0][2])
region_size = region_border_down-region_border_up
color_forward = color_plus
color_reverse = color_minus
max_y_pos = None
if(not len(genes_in_region) == 0):
# Determine y positions of genes for plotting
max_y_pos, y_pos_dict = determineYPosGene(genes_in_region,
(region_border_down-
region_border_up),
distance_ratio)
if(not y_max is None):
max_y_pos = y_max
# Plot Exons
for i in exons_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
rect = Rectangle((start, y-.2),
end-start,
.4,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
patch_list = []
patch_description_list = []
met_forward = False
met_reverse = False
# Plot Introns
for i in introns_in_region:
start = int(i[1])
end = int(i[2])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
y = max_y_pos-y_pos_dict[gene_name]+0.5
patch = Rectangle((start, y-.03),
end-start,
.06,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(patch)
if(strand == "+" and not(met_forward)):
patch_list += [patch]
patch_description_list += ["forward strand"]
met_forward = True
elif(strand == "-" and not(met_reverse)):
patch_list += [patch]
patch_description_list += ["reverse strand"]
met_reverse = True
# Plot Gene Names
if(plot_gene_ids):
for i in genes_in_region:
start = int(i[1])
gene_name = str(i[3])
if(not blacklist is None and gene_map[gene_name] in blacklist):
continue
# Define color for gene plotting
strand = str(i[5])
color = color_forward
if(strand == "-"):
color = color_reverse
border_distance_down = region_border_down-start
if(start < region_border_up):
start = region_border_up
border_distance_down = region_border_down-start
if(not(float(border_distance_down)/float(region_size)
< distance_ratio)):
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start,
y,
gene_name_label,
size=5,
color = color)
gene_name = str(i[3])
gene_name_label = gene_name
if(not gene_map is None):
gene_name_label = gene_map[gene_name]
y = max_y_pos-y_pos_dict[gene_name]+.8
plt.text(start, y, gene_name_label, size=5, color = color)
plt.xlim([region_border_up, region_border_down])
plt.ylim([0, max_y_pos+1.5])
plt.yticks([], [])
if(plot_legend):
plt.legend(patch_list,
patch_description_list,
loc=legend_loc,
fontsize=5)
return max_y_pos+1.5, patch_list, patch_description_list
def determineYPosGene(genes_bed,
region_size,
distance_ratio):
'''Function that determines the max y position for gene plotting via
function plotGenes.
:param genes_bed: :class:`pybedtools.BedTool` object containing genes to be
plotted.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_size: Size of region to be plotted in base pairs.
:type region_size: int
:param distance_ratio: Minimal distance between two genes, as ratio of ax
width, such that two genes are plotted side by side. If this ratio is
underwent, the genes will be stacked.
:type distance_ratio: float
:return: Tuple of
1. max_y_pos: Defines the number of stacked genes.
2. y_pos_dict: Dictionary with keys = gene ids and values = y position \
of gene.
:rtype: tuple
'''
sort_indices = [int(idx) for idx in np.argsort([i[1] for i in genes_bed])]
genes_sorted_bed = [genes_bed[i] for i in sort_indices]
y_pos_dict = {}
y_level_dict = {}
max_y_pos = 0
for interval in genes_sorted_bed:
gene_name = interval[3]
gene_start = int(interval[1])
gene_end = int(interval[2])
for i in range(max_y_pos+1):
if(i == 0 and not max_y_pos in y_level_dict):
y_pos_dict[gene_name] = i
y_level_dict[i] = [[gene_start, gene_end]]
break
elif(gene_start > y_level_dict[i][-1][1] and
float(gene_start-y_level_dict[i][-1][0])/float(region_size) >
distance_ratio):
y_pos_dict[gene_name] = i
y_level_dict[i] += [[gene_start, gene_end]]
break
elif(i == max_y_pos):
max_y_pos += 1
y_pos_dict[gene_name] = max_y_pos
y_level_dict[max_y_pos] = [[gene_start, gene_end]]
break
else:
continue
return max_y_pos, y_pos_dict
def createGeneNameMap(gene_name_mapping_filename):
'''Function that creates a mapping between gene ids
:param gene_name_mapping_file: Path to a tab separated file, for which the
first column is a ensemble gene id, and the second column is the HUGO
gene name
:type gene_name_mapping_file: str
:return: Dictionary containing the gene id mapping.
:rtype: dictionary
'''
gene_name_mapping_file = open(gene_name_mapping_filename, "r")
gene_map = {}
for line in gene_name_mapping_file:
split_line = line.rstrip().split("\t")
ensembl_gene_id = split_line[0].split(".")[0]
hugo_gene_symbol = split_line[1].split(".")[0]
gene_map[ensembl_gene_id] = hugo_gene_symbol
gene_name_mapping_file.close()
return gene_map
def plotGeneExpression(genes_bed,
region_bed,
expression_df_g1,
expression_df_g2,
gene_names_map,
blacklist=None,
ax=None,
plot_legend=False,
color_g1="#fb8072",
color_g2="#80b1d3",
g1_id="tumor",
g2_id="normal",
plot_gene_names=True):
'''Function for plotting paired gene expression (e.g. tumor and normal) on a
gene region scale retaining the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing TXstart,
and TXend of genes.
:type genes_bed: :class:`pybedtools.BedTool`
:param region_bed: :class:`pybedtools.BedTool` object containing the region
to be plotted
:type region_bed: :class:`pybedtools.BedTool`
:param expression_df_g1: :class:`pandas.Dataframe` containing the expression
values of g1 samples (columns: sample ids; index: gene ids)
:type expression_df_g1: :class:`pandas.DataFrame`
:param expression_df_g2: :class:`pandas.Dataframe` containing the expression
values of g2 samples (columns: sample ids; index: gene ids)
:type expression_df_g2: :class:`pandas.DataFrame`
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param blacklist: Set containing gene ids not to be plotted, default to
None.
:type blacklist: set, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True legend is plotted, False otherwise, defaults to
False.
:type plot_legend: bool
:param color_g1: Color used for plotting g1 samples expression, defaults to
"#fb8072".
:type color_g1: str, optional
:param color_g2: Color used for plotting g2 samples expression, defaults to
"#80b1d3".
:type color_g2: str, optional
:param g1_id: ID of g1 used for legend plotting, defaults to "tumor".
:type g1_id: str, optional
:param g2_id: ID of g2 used for legend plotting, defaults to "normal".
:type g2_id: str, optional
:param plot_gene_names: If True, the HUGO GENE SYMBOLs will be shown, else
the GENE SYMBOLs are hidden.
:type plot_gene_names: bool.
:return: Axis on which plot was placed.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
ax = ax if ax is not None else plt.gca()
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
gene_names += [gene_names_map[gene_name_ens]]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
for i in range(len(gene_regions)):
if(not blacklist is None and gene_names[i] in blacklist):
continue
left_border = gene_regions[i][0]
right_border = None
if(i < len(gene_names)-1):
right_border = gene_regions[i+1][0]
else:
right_border = region_right_border
current_extension = right_border-left_border
if(current_extension == 0.):
continue
if(extension is None):
extension = float(current_extension)
elif(current_extension < extension):
extension = float(current_extension)
boxprops = {"color": "k", "linewidth": .3}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = None
patch_description_list = None
tick_positions = []
gene_names_clean = []
counter=0
patch_saved = False
for gene_name in gene_names:
left_border = gene_regions[counter][0]
right_border = region_right_border
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
if(counter < len(gene_names)-1):
right_border = gene_regions[counter+1][0]
bplot_g1_pos = left_border + extension/4.
bplot_g2_pos = left_border + 3*(extension/4.)
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values_g1 = expression_df_g1.loc[gene_name, :]
if(type(exp_values_g1).__name__ == "Series"):
exp_values_g1 = list(exp_values_g1)
else:
exp_values_g1 = list(exp_values_g1.iloc[0, :])
exp_values_g2 = expression_df_g2.loc[gene_name, :]
if(type(exp_values_g2).__name__ == "Series"):
exp_values_g2 = list(exp_values_g2)
else:
exp_values_g2 = list(exp_values_g2.iloc[0, :])
bplot_g1 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g1])],
positions=[bplot_g1_pos],
widths=extension/2.,
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g2 = ax.boxplot([np.log2([i if
i >= 1. else
1. for
i in exp_values_g2])],
positions=[bplot_g2_pos],
widths=extension/2.,
patch_artist = True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
bplot_g1["boxes"][0].set_facecolor(color_g1)
bplot_g2["boxes"][0].set_facecolor(color_g2)
if(not patch_saved):
patch_saved=True
patch_list = [bplot_g1["boxes"][0], bplot_g2["boxes"][0]]
patch_description_list = [g1_id, g2_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(
ticker.FixedFormatter(([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(6)
for ytick in ax.get_yticklabels():
ytick.set_size(6)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGeneExpressionEqualDist(genes_bed,
gene_mid_points,
region,
expression_df,
groups,
gene_names_map=None,
blacklist=None,
ax=None,
plot_legend=False,
colors=None,
ids=None,
plot_gene_names=True,
position_gene_names="bottom",
log_transformed=True,
plot_points=False,
alpha=.5):
'''Function for plotting grouped gene expression (e.g. tumor and normal) on
a gene region scale equalizing the position of genes.
:param genes_bed: :class:`pybedtools.BedTool` object containing gene
regions.
:type genes_bed: :class:`pybedtools.BedTool`
:param gene_mid_points: list of integer values containing center positions
of genes.
:type gene_mid_points: list
:param region: List containing the region to be plotted
([<chrom>, <start>, <end>]).
:type region: list
:param groups: List of lists containing the IDs of the different groups.
:type groups: list
:param gene_names_map: Dictionary with keys: ENSEMBL GENE IDs, and values:
HUGO GENE SYMBOLs.
:type gene_names_map: dict.
:param expression_df: class:`pandas.DataFrame` object containing the
expression values of all samples (columns: sample ids; index: gene ids).
:type expression_df: class:`pandas.DataFrame`
:param blacklist: Set containing gene ids not to be plotted, defaults to
None,
:type blacklist: set, optional
:param ax: (default: None) Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param plot_legend: If True plot legend, False otherwise, defaults to False.
:type plot_legend: bool, optional
:param colors: List of colors used for plotting samples expression. The
number of colors must be the same as the number of groups, defaults to
None.
:type colors: str, optional
:param ids: IDs used for legend plotting, defaults to None. Number of ids
must be the same as the number of groups.
:type ids: list, optional.
:param plot_gene_names: True if gene names shall be plotted,
False otherwise, defaults to True.
:type plot_gene_names: bool, optional
:param position_gene_names: Either of "top", or "bottom", defaults to
"bottom".
:type position_gene_names: str, optional
:param log_transformed: If True use log transformed values for plotting,
non-transformed values otherwise.
:type log_transformed: bool, optional
:param plot_points: If True, a point per expression value is plotted in
addition to the boxplot, no points are plotted otherwise, defaults to
False.
:type plot_points: bool, optional
:param alpha: Alpha value for the background color of the boxplots boxes,
defaults to 0.5.
:type alpha: float, optional
:return: Plots axis.
:rtype: :class:`matplotlib.axes._subplots.AxesSubplot`
'''
standard_colors = ["#66c2a5",
"#fc8d62",
"#8da0cb",
"#ec87c2",
"#a6d854",
"#ffd92f",
"#e5c494",
"#bbbbbb"]
ax = ax if ax is not None else plt.gca()
region_bed = pybedtools.BedTool("\t".join([str(i) for i in region]),
from_string=True)
# Get gene names and regions
genes_in_region_bed = genes_bed.intersect(region_bed,
wa=True,
u=True).sort()
gene_names = []
gene_regions = []
for e in genes_in_region_bed:
gene_name_ens = str(e[3])
if(not gene_names_map is None):
gene_names += [gene_names_map[gene_name_ens]]
else:
gene_names += [gene_name_ens]
gene_regions += [[int(e[1]), int(e[2])]]
region_right_border = int(region_bed[0][2])
region_left_border = int(region_bed[0][1])
# Determine minimal extension of barplot
extension=None
if(len(gene_mid_points) <= 1):
extension=region[2]-region[1]
else:
extension=gene_mid_points[1]-gene_mid_points[0]
# Subtract a small percentage of region size from extension
extension=extension-(region[2]-region[1])*.01
boxprops = {"color": "k", "linewidth": .3, "alpha":alpha}
flierprops = {"color": "k"}
medianprops = {"color": "k", "linewidth": .3}
whiskerprops = {"color": "k", "linewidth": .3}
capprops={"color": "k", "linewidth": .3}
patch_list = []
patch_description_list = []
tick_positions = []
gene_names_clean = []
counter=0
for gene_name in gene_names:
left_border = gene_mid_points[counter]-extension/2
right_border = gene_mid_points[counter]+extension/2
if(not blacklist is None and gene_name in blacklist):
counter += 1
continue
n_groups = len(groups)
for g in range(n_groups):
bplot_pos = left_border + (2*g+1)*extension/float((n_groups*2.))
tick_positions += [left_border + extension/2.]
gene_names_clean += [gene_name]
exp_values = expression_df.loc[gene_name, groups[g]]
if(type(exp_values).__name__ == "Series"):
exp_values = list(exp_values)
else:
exp_values = list(exp_values.iloc[0, :])
expression_values = exp_values
if(log_transformed):
expression_values = np.log2([i
if i >= 1.
else 1.
for i in exp_values])
bplot = ax.boxplot(expression_values,
positions=[bplot_pos],
widths=extension/float(n_groups),
patch_artist=True,
boxprops=boxprops,
flierprops=flierprops,
medianprops=medianprops,
whiskerprops=whiskerprops,
capprops=capprops,
showfliers=False)
color = None
if(not colors is None):
color = colors[g]
else:
color = standard_colors[g]
bplot["boxes"][0].set_facecolor(color)
if(plot_points):
x_positions = [ (bplot_pos+
(i-.5)*
((2*extension)/(float(n_groups)*3))) for i in
list(np.random.rand(len(expression_values))) ]
plt.plot(x_positions, expression_values, "k.", markersize=3)
g_id = None
if(not ids is None):
g_id = ids[g]
else:
g_id = "group "+str(g)
if(not g_id in patch_description_list):
patch_list += [bplot["boxes"][0]]
patch_description_list += [g_id]
counter += 1
ax.set_xlim(region_left_border, region_right_border)
if(position_gene_names == "top"):
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_major_locator(ticker.FixedLocator((tick_positions)))
ax.xaxis.set_major_formatter(ticker.FixedFormatter((gene_names_clean)))
if(not plot_gene_names):
ax.xaxis.set_major_formatter(ticker.FixedFormatter(
([ " " for i in
gene_names_clean])))
for tick in ax.get_xticklabels():
tick.set_rotation(45)
tick.set_size(5)
for ytick in ax.get_yticklabels():
ytick.set_size(5)
if(plot_legend):
ax.legend(patch_list,
patch_description_list,
fontsize=5,
loc='lower left')
return ax
def plotGenomicSegments(segments_list,
chrom,
start,
end,
ax = None):
'''Function for plotting genomix segments in different colors
:param segments_tabix_filename: Path to tabixed bed file containing
(chrom, start, end, name, score, strand, start, end, color). The color
field is used to determine the color for plotting (R,G,B).
:type segments_Tabix_filename: str
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: str
:param end: End position of the region to be plotted.
:type end: str
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Dictionary with keys = names of segments, and values patch
:rtype: dict
'''
ax = ax if ax is not None else plt.gca()
patches_dict = {}
for segment in segments_list:
segment_start = int(segment[1])
segment_end = int(segment[2])
color = tuple([ float(i)/256. for i in
str(segment[-1]).split(",") ]+[1])
segment_type = str(segment[3])
if(segment_type == "R"):
color = (1,1,1,1)
rect = Rectangle((segment_start, 0),
segment_end-segment_start,
1,
color=color)
ax.add_patch(rect)
patches_dict[segment_type] = rect
plt.xlim(int(start), int(end))
plt.ylim(0, 1)
plt.yticks([], [])
return patches_dict
def plotCNVs(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
color_gain="g",
color_loss="r",
color_neutral="k",
ax=None):
'''Function for plotting CNV segments
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param color_gain: Plot color of copy number gains, defaults to "g".
:type color_gain: str, optional
:param color_loss: Plot color of copy number losses, defaults to "r".
:type color_loss: str, optional
:param color_neutral: Plot color of copy number neutral regions, defaults to
"k".
:type color_neutral: str, optional
:param ax: Axis used for plotting.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
# Smooth tcn, if ploidy_dev is smaller than cnv_threshold
if(abs(ploidy_dev) < cnv_threshold):
tcn = ploidy
color = color_neutral
if(ploidy_dev >= cnv_threshold):
color=color_gain
elif(ploidy_dev <= -1.*cnv_threshold):
color = color_loss
if(abs(ploidy_dev) > cnv_threshold):
rect = Rectangle((current_start, tcn-.2),
current_end-current_start,
.4,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
else:
rect = Rectangle((current_start, tcn-.1),
current_end-current_start,
.2,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
# Plot thresholds
color_threshold=(189./255., 189./255., 189./255., 0.5)
if(ploidy == 2):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
elif(ploidy == 4):
plt.plot([int(start), int(end)],
[1, 1],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[2, 2],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[3, 3],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[4, 4],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[5, 5],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.plot([int(start), int(end)],
[6, 6],
color=color_threshold,
linestyle="--",
linewidth=.5)
plt.xlim([int(start), int(end)])
if(ploidy == 2):
plt.ylim([0, 4.5])
plt.yticks([0, 1, 2, 3, 4], ["0", "1", "2", "3", "4"], size=6)
elif(ploidy == 4):
plt.ylim([0, 6.5])
plt.yticks([0, 2, 4, 6], ["0", "2", "4", "6"], size=6)
plt.xticks(rotation=45)
def plotCNVsHeat(cnvs_bed,
chromosome,
start,
end,
ploidy=2,
cnv_threshold=0.7,
cmap="bwr",
max_dev=None,
ax=None):
'''Function for plotting CNV segments as heatmap
:param cnvs_bed: :class:`pybedtools.BedTool` object containing CNVs with
following entries:
1. Chromosome,
2. Start Position,
3. End Position,
4. Deviation from ploidy,
5. True Copy Number)
:type cnvs_bed: :class:`pybedtools.BedTool`
:param chromosome: Chromosome for which to plot CNVs.
:type chromosome: str
:param start: Start position on chromosome.
:type start: int
:param end: End position on chromosome.
:type end: int
:param ploidy: Assumed ploidy of tumor, defaults to 2.
:type ploidy: int, optional
:param cnv_threshold: Minimal deviation from ploidy to be considered as a
CNV, defaults to 0.7.
:type cnv_threshold: float, optional
:param cmap: Colormap used for plotting CNVs, defaults to "bwr".
:type cmap: str, optional
:param max_dev: Maximal deviation from ploidy to plot, defaults to None.
:type max_dev: float, optional
:param ax: Axis used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
# Use given axis for plotting
ax = ax if ax is not None else plt.gca()
colors = plt.cm.get_cmap(cmap)
if(max_dev is None):
max_dev = max([abs(float(i[3])) for i in cnvs_bed])
for interval in cnvs_bed:
current_start = int(interval[1])
current_end = int(interval[2])
ploidy_dev = float(interval[3])
tcn = float(interval[4])
if(tcn < -1.*max_dev):
tcn = -1.*max_dev
elif(tcn > max_dev):
tcn = max_dev
color = colors((ploidy_dev+max_dev)/(2*max_dev))
if(abs(ploidy_dev) < cnv_threshold):
color=colors(.5)
rect = Rectangle((current_start, .5),
current_end-current_start,
1,
color=color,
edgecolor='none',
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([int(start), int(end)])
plt.ylim([.5, 1.5])
plt.xticks([], [])
plt.yticks([], [])
def readACESeqAsBed(input_filename):
'''Function that reads CNVs from ACESeq ("*most_important*") files and
converts them to pybedtools.BedTool object
:param input_filename: Full path to ACESeq "most_important" file
:type input_filename: str
:return: :class:`pybedtools.BedTool` object containing CNVs from ACESeq
:rtype: :class:`pybedtools.BedTool`
'''
input_file = open(input_filename, "r")
cnv_bed_list = []
ploidy = None
for line in input_file:
if(line[:7] == "#ploidy"):
ploidy = float(line.rstrip().split(":")[1])
print(ploidy)
if(line[0] == "#" or line[:5] == "chrom"):
continue
split_line = line.rstrip().split("\t")
ploidy_dev = float(split_line[5])-ploidy
chrom = split_line[0]
if(chrom == "23"):
chrom="X"
elif(chrom == "24"):
chrom = "Y"
cnv_bed_list += [ [chrom,
split_line[1],
split_line[2],
str(ploidy_dev),
split_line[5],
"+"]
]
input_file.close()
return pybedtools.BedTool("\n".join(["\t".join(e) for e in
cnv_bed_list]),
from_string=True)
def plotChIPSignals(chip_signals,
r_chrom,
r_start,
r_end,
ax=None,
color="b",
offset=None,
merge=None):
'''Function that plots bedGraph like iterators.
:param chip_signals: Iterator for which each element is a list-ike
object containing:
1. Chromosome
2. Start postion
3. End position
4. Value to be plotted as bar
:type chip_signals: iterator
:param r_chrom: Chromosome of region to be plotted.
:type r_chrom: str
:param r_start: Start position of region to be plotted.
:type r_start: int
:param r_end: End position of region to be plotted.
:type r_end: int
:param ax: Axis of plot
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param color: color of bars, defaults to "b".
:type color: str, optional
:param offset: Length of intervals, defaults to None.
:type offset: int, optional
:param merge: Number of elements to be merged. If this value is not equal to
0, than merge elements will be averaged an plotted, defaults to 0.
:type merge: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_signal = 0
left = []
height = []
for signal in chip_signals:
start = int(signal[1])
end = int(signal[2])
value = float(signal[3])
if(value > max_signal):
max_signal = value
if(not offset is None):
end = start + offset
left += [start]
height += [value]
left_merged = []
height_merged = []
if(not merge is None):
heights = []
lefts = []
for i in range(len(left)):
if(i % merge == 0 and not (i == 0)):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
heights += [height[i]]
lefts += [left[i]]
if(not i % merge == 0):
left_merged += [lefts[0]]
lefts = []
height_merged += [np.mean(heights)]
heights = []
offset = merge*offset
left = left_merged
height = height_merged
plt.bar(left, height, offset, color = color, edgecolor = color)
plt.xlim(r_start, r_end)
def plotMethylationProfileHeat(methylation_bed,
chrom,
start,
end,
bin_size=1000,
ax = None):
'''Function for plotting methylation values as heatmap
:param methylation_bed: Methylation calls. Following fields must be
included: Chrom, Start, End, Methylated Cs, Unmethylated Cs.
:type methylation_bed: :class:`pybedtools.BedTool`
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param bin_size: size of bin to average methylation values, defaults to
1000.
:type bin_size: int, optional
:param ax: Axis to be used for plotting, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
binned_meth_calls = [ [0, 0] for i in range(int(((end-start)/bin_size)+1)) ]
counter = 0
for element in methylation_bed:
# Determine bin
position = int(element[1])
if(position < start or position > end):
continue
n_meth = int(element[3])
n_unmeth = int(element[4])
current_bin = int((position-start)/bin_size)
counter += 1
binned_meth_calls[current_bin][0] += n_meth
binned_meth_calls[current_bin][1] += n_unmeth
binned_average_meth = [ float(i[0])/(float(i[0])+float(i[1]))
if (float(i[0])+float(i[1])) > 0
else "NA"
for i in binned_meth_calls ]
binned_average_meth_no_missing = []
n = len(binned_average_meth)
for i in range(n):
if(not binned_average_meth[i] == "NA"):
binned_average_meth_no_missing += [binned_average_meth[i]]
else:
meth_before = (binned_average_meth[i-1]
if not i == 0
else "NA")
meth_after = (binned_average_meth[i+1]
if not i == len(binned_average_meth)-1
else "NA")
average_list = [ j
for j
in [meth_before, meth_after]
if not j == "NA" ]
binned_average_meth_no_missing += [ (float(sum(average_list))/
float(len(average_list)))
if len(average_list) > 0
else 0. ]
binned_average_meth = binned_average_meth_no_missing
# Plot average methylation values per bin
# Define Colormap
cmap = cm.bwr
norm = matplotlib.colors.Normalize(vmin=0., vmax=1.)
m = matplotlib.cm.ScalarMappable(norm = norm, cmap = cmap)
for cbin in range(len(binned_average_meth)):
rect = Rectangle((start+cbin*bin_size, 0),
bin_size,
1,
color=m.to_rgba(binned_average_meth[cbin]))
ax.add_patch(rect)
plt.xlim([start, end])
plt.ylim([0, 1])
plt.xticks([], [])
plt.yticks([], [])
def plotMethylationProfile(meth_calls,
chrom,
start,
end,
color="k",
ax=None):
'''Function that plots methylation values as dot plots.
:param meth_calls: Iterator containing list-like elements with the following
entries:
1. Chromsome
2. Start position
3. end position
4. Number methylated cytosines
5. Number unmethylated cytosines
Or
1. Chromsome
2. Start position
3. end position
4. Beta Value
:type meth_calles: iterator
:param chrom: Chromosome of region to be plotted.
:type chrom: str
:param start: Start position of region to be plotted.
:type start: int
:param end: End position of region to be plotted.
:type end: int
:param color: Color of points representing methylation values, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
n_entries = len(meth_calls[0])
if(n_entries == 5):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[3])/(float(m[3])+float(m[4]))
if not(float(m[3])+float(m[4]) == 0.)
else 0. for m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
elif(n_entries == 4):
plt.plot([ (float(m[1])+float(m[2]))/2. for m in meth_calls ],
[ float(m[4]) for m in m in meth_calls],
color=color,
marker=".",
linestyle='None',
markersize=1,
alpha=.5)
plt.ylim([0, 1])
plt.xticks([], [])
plt.xlim([start, end])
def plotTX(chrom_r,
start_r,
end_r,
TX_pos,
direction="right",
color="k",
ax=None):
'''Function that plots a translocation event as a bar, showing the part
of the genome that is translocated.
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Start position of the region to be plotted.
:type start_r: int
:param end_r: End position of the region to be plotted.
:type end_r: int
:param TX_pos: Position of the translocation.
:type TX_pos: int
:param direction: Direction of the genomic part that is translocated. Either
of "left" (upstream), or "right" (downstream), defaults to "left".
:type direction: str, optional
:param color: Color of the bar representing the translocation, defaults to
"k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
TX_start = TX_pos
TX_end = end_r
if(direction == "left"):
TX_start = start_r
TX_end = TX_pos
rect = Rectangle((TX_start, .4),
TX_end-TX_start,
.2,
color=color,
capstyle='butt',
linewidth=0)
ax.add_patch(rect)
plt.xlim([start_r, end_r])
plt.ylim([0.3, 0.7])
def plotRegions(regions,
start,
end,
color="#cbebc4",
edgecolor=False,
alpha=1,
ax = None):
'''Functions that plots genomic regions as simple rectangles.
:param regions: Iterator containig list-like elements with the following
entries:
1. Chromosome
2. Start position
3. End position
:type regions: iterator
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the rectangles representing the regions to be
plotted, defaults to "#cbebc4".
:type color: str, optional
:param edge_color: Color of region edge. If False, no edge is plotted,
defaults to False.
:type edge_color: str, optional
:param alpha: Alpha value of the rectangle, representing the region to be
plotted, defaults to 1.
:type alpha: float, optional.
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
c = 0
for region in regions:
if(not edgecolor):
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor='none',
alpha=alpha)
c += 1
else:
current_color = color
rect = Rectangle([int(region[1]), -.75],
int(region[2])-int(region[1]),
1.5,
facecolor=current_color,
edgecolor=edgecolor,
alpha=alpha)
c += 1
ax.add_patch(rect)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start, end])
plt.ylim([-1, 1])
def plotMotifDirections(motifs_bed,
start,
end,
head_width=0.2,
head_length=1000,
overhang=0,
color_plus="#80b1d3",
color_minus="#fb8072",
ax=None):
'''Function that plots TF motifs as arrows, indicating their directionality.
:param motifs_bed: :class:`pybedtools.BedTool` object containing regions
of the TF sited to be plotted.
:type motifs_bed: :class:`pybedtools.BedTool`
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param head_width: Width of the arrow head as proportion of the arrow,
defaults to 0.2
:type head_width: float, optional
:param head_length: Length of the arrow in bp (depends on the region that
is plotted), defaults to 1000.
:type head_length: int, optional
:param overhang: Fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one. Defaults to 0.
:type overhang: float, optional
:param color_plus: Color of plus stranded TF regions, defaults to "#80b1d3".
:type color_plus: str, optional
:param color_minus: Color of plus stranded TF regions, defaults to
"#fb8072".
:type color_minus: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
for motif in motifs_bed:
motif_start = int(motif[1])
motif_end = int(motif[2])
strand = str(motif[3])
arrow_start = motif_start
arrow_end = motif_end
color=color_plus
dx = head_length
if(strand == "-"):
arrow_start = motif_end
arrow_end = motif_start
color = color_minus
dx = -1.*head_length
plt.arrow(arrow_start,
.5,
dx,
0,
head_width=head_width,
head_length=head_length,
overhang=overhang,
head_starts_at_zero=False,
edgecolor="none",
facecolor=color,
length_includes_head=True)
plt.xlim([start, end])
plt.ylim([0.4, 0.6])
def plotHiCContactMap(contact_map,
start,
end,
segment_size,
cmap="Greys",
vmin=None,
vmax=None,
location="top",
ax=None):
'''Function that plots HiC contact maps as pyramid plots
:param contact_map: Matrix that contains the intensity values of HiC
contacts.
:type contact_map: :class:`pandas.DataFrame`
:param start: Chromosomal start position of region to be plotted.
:type start: int
:param end: Chromosomal end position of region to be plotted.
:type end: int
:param segment_size: Size of the segments for which contacts were called.
:type segment_size: int
:param cmap: Name of the colormap to be used for plotting HiC intensities,
defaults to "Greys".
:type cmap: str, optional
:param vmin: Minimal value of intensity range to be plotted, defaults to
None
:type vmin: float, optional
:param vmax: Maximal value of intensity range to be plotted, defaults to
None.
:type vmax: float, optional
:param location: Either of "top" | "bottom". If location == "top", the
pyramid points upwards, else if location == "bottom" the pyramid points
downwards, defaults to top,
:type location: str, optional
:param ax: Axis on which to plot contact map, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
contact_map_index1 = (start)/segment_size
contact_map_index2 = ((end)/segment_size)+1
sliced_contact_map = contact_map.iloc[contact_map_index1:contact_map_index2,
contact_map_index1:contact_map_index2]
if(vmin is None):
vmin = 0
if(vmax is None):
vmax = np.percentile(contact_map, 99.9)
colormap = plt.get_cmap(cmap)
for i in range(contact_map_index1, contact_map_index2):
y_range = (range(contact_map_index1+(i-contact_map_index1),
contact_map_index2)
if location == "top"
else range(contact_map_index1,
contact_map_index2-(contact_map_index2-i)))
for j in y_range:
# Define midpoint of rectangle
midpoint = (i*segment_size+(j*segment_size-i*segment_size)/2.,
(j*segment_size-i*segment_size)/2.)
vertices = [(midpoint[0]-segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]-segment_size/2.),
(midpoint[0]+segment_size/2., midpoint[1]),
(midpoint[0], midpoint[1]+segment_size/2.),
(midpoint[0]-segment_size/2., midpoint[1])
]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY,
]
path = Path(vertices, codes)
intensity_value = contact_map.iloc[i, j]
intensity_value = (intensity_value/vmax
if intensity_value <= vmax
else 1.)
facecolor = colormap(intensity_value)
patch = matplotlib.patches.PathPatch(path,
facecolor=facecolor,
edgecolor='none')
ax.add_patch(patch)
ax.set_xlim(start, end)
if(location == "top"):
ax.set_ylim(0, (end-start)/2.)
else:
ax.set_ylim(-1.*(end-start)/2., 0)
def distanceEqualizer(genomic_segments,
start,
end,
direction="top_down",
color="k",
ax = None):
'''Function that plots arcs from unequal distances of genomic segments to
equal distances.
:param genomic_segments: List of segments for which distances shall be
equalized (each segment is of the form [<chrom>, <start>, <end>])
:type genomic_segments: list
:param start: Start position of the genomic region.
:type start: int
:param end: End position of the genomic region.
:type end: int
:param color: Color of lines equalizing distances, defaults to "k".
:type color: str, optional
:param direction: Direction of distance equalization (top_down | bottom_up),
defaults to "top_down".
:type direction: str, optional.
:param ax: Axis on which to plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: List of equalized region midpoints.
:rtype: list
'''
ax = ax if ax is not None else plt.gca()
# Calculate midpoints of original and distance equalized segments
n_segments = len(genomic_segments)
equalized_region_size = (end-start)
if(n_segments > 0):
equalized_region_size=(end-start)/n_segments
equalized_region_mid_points = []
for i in range(1, n_segments+1):
equalized_region_mid_points += [((start+
i*equalized_region_size)-
equalized_region_size/2)]
region_mid_points = []
for e in genomic_segments:
if(int(e[1]) < start):
region_mid_points += [start+(int(e[2])-start)/2]
elif(int(e[2]) > end):
region_mid_points += [int(e[1])+(end-int(e[1]))/2]
else:
region_mid_points += [int(e[1])+(int(e[2])-int(e[1]))/2]
for i in range(len(region_mid_points)):
region_mid_point = region_mid_points[i]
equalized_region_mid_point = equalized_region_mid_points[i]
codes = []
vertices = []
if(direction == "top_down"):
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 1),
(region_mid_point, .8),
(equalized_region_mid_point, .2),
(equalized_region_mid_point, 0)]
else:
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
vertices = [(region_mid_point, 0),
(region_mid_point, .2),
(equalized_region_mid_point, .8),
(equalized_region_mid_point, 1)]
path = Path(vertices, codes)
path_patch = PathPatch(path,
facecolor="none",
edgecolor=color,
linewidth=.5)
ax.add_patch(path_patch)
ax.axis("off")
plt.xlim([start, end])
plt.ylim([0, 1])
return equalized_region_mid_points
def plotCoordinates(chrom,
start,
end,
color="k",
ax = None,
upper=True,
loc_coordinates="up",
revert_coordinates=False,
rotation=0):
'''Function that plots genomic coordinates in a linea fashion.
:param chrom: Chromosome of the region to be plotted.
:type chrom: str
:param start: Start position of the region to be plotted.
:type start: int
:param end: End position of the region to be plotted.
:type end: int
:param color: Color of the genomic scales elements, defaults to "k".
:type color: str, optional
:param ax: Axis of plot, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:param upper: If True, make less ticks, else if False make more ticks.
:type upper: bool, optional
:param loc_coordinates: Either of "up" | "down". If "up", plot ticks to
upper direction, else if "down", plot ticks to lower direction, defaults
to "up".
:type loc_coordinates: str, optional
:param revert_coordinates: If True, coordinates are reverted to decreasing
order. Else, coordinates stay in increasing order, defaults to False.
:type revert_coordinates: bool, optional
:param rotation: Rotational angle of coordinate strings, defaults to 0.
:type rotation: int, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
tick_size = 10**math.ceil((np.log10((end-start)/10)))
if(not upper):
tick_size = 10**int((np.log10((end-start)/10)))
# Determine first tick position
first_tick = start+(tick_size-start%tick_size)
ticks = []
current_tick = first_tick
while(current_tick <= end):
ticks += [current_tick]
current_tick = current_tick + tick_size
scale = None
if(first_tick > 1000000):
scale = "Mb"
else:
scale="Kb"
digits_to_round = None
divisor = None
if(scale == "Mb"):
digits_to_round = int(6-np.log10(tick_size))
divisor = 1000000
else:
digits_to_round = int(5-np.log10(tick_size))
divisor = 100000
tick_labels = [ str(round(i/float(divisor), digits_to_round))+scale
for i in ticks ]
if(loc_coordinates == "up"):
plt.plot([start, end],
[0, 0],
linestyle="-",
color=color,
linewidth=1)
else:
plt.plot([start, end],
[0.3, 0.3],
linestyle="-",
color=color,
linewidth=1)
if(revert_coordinates):
ticks = [ start + end-i for i in ticks ]
ticks.reverse()
tick_labels.reverse()
print(tick_labels)
for i in range(len(ticks)):
if(loc_coordinates == "up"):
plt.plot([ticks[i], ticks[i]],
[0., .3],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
.4,
tick_labels[i],
horizontalalignment="center",
verticalalignment="bottom",
fontsize=5,
color=color,
rotation=rotation)
else:
plt.plot([ticks[i], ticks[i]],
[.3, .0],
linestyle="-",
color=color,
linewidth=1)
plt.text(ticks[i],
-.1,
tick_labels[i],
horizontalalignment="center",
fontsize=5,
color=color,
verticalalignment="top",
rotation=rotation)
plt.xlim([start, end])
plt.yticks([], [])
if(loc_coordinates == "up"):
plt.ylim([-.1, .8])
else:
plt.ylim([-1.5, .3])
plt.xticks([], [])
ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
def plotLinksAsArcs(links_bed,
chrom_r,
start_r,
end_r,
lw=1,
color="k",
ax = None):
'''Function that plots links between genomic regions as arcs.
:param links_bed: Iterator, that contains bed-like structured lists with the
following elements:
1. Chromosome region1
2. Start region1
3. End region1
4. Chromosome region2
5. Start region2
6. End region2
:type links_bed: iterator
:param chrom_r: Chromosome of the region to be plotted.
:type chrom_r: str
:param start_r: Chromosomal start position of the region to be plotted.
:type start_r: int
:param end_r: Chromosomal end positiont of the region to be plotted.
:type end_r: int
:param color: Color of the arc, defaults to "k".
:type color: str, optional.
:param ax: Axis where the plot is drawn, defaults to None.
:type ax: :class:`matplotlib.axes._subplots.AxesSubplot`, optional
:return: Nothing to be returned.
:rtype: None
'''
ax = ax if ax is not None else plt.gca()
max_dist = 0
for e in links_bed:
link_pos1 = int(e[1])+(int(e[2])-int(e[1]))/2
link_pos2 = int(e[4])+(int(e[5])-int(e[4]))/2
distance = abs(link_pos2-link_pos1)
if(distance > max_dist):
max_dist = distance
mid_point = link_pos1 + (link_pos2-link_pos1)/2
if(link_pos2 < link_pos2):
mid_point = link_pos2 + (link_pos1-link_pos2)/2
vertices = [(link_pos1, 0),
(mid_point, distance),
(link_pos2, 0)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
path = Path(vertices,
codes)
patch = PathPatch(path,
facecolor = "None",
edgecolor = color,
lw = lw)
ax.add_patch(patch)
#ax.spines["bottom"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
plt.xticks([], [])
plt.yticks([], [])
plt.xlim([start_r, end_r])
plt.ylim([0, max_dist/2])
| 35.254447 | 80 | 0.545101 |
e010b163b6fbc347a75063de7760418370bb37d6
| 31,171 |
py
|
Python
|
toc/fsa/fsa.py
|
djrochford/toc
|
934d19b4acda55a6d4610c8a91b1a6005ff7b683
|
[
"MIT"
] | null | null | null |
toc/fsa/fsa.py
|
djrochford/toc
|
934d19b4acda55a6d4610c8a91b1a6005ff7b683
|
[
"MIT"
] | null | null | null |
toc/fsa/fsa.py
|
djrochford/toc
|
934d19b4acda55a6d4610c8a91b1a6005ff7b683
|
[
"MIT"
] | null | null | null |
"""
File containing DFA and NFA public classes
"""
import collections.abc
from itertools import product, chain, combinations
from string import printable
from typing import (
AbstractSet,
Container,
FrozenSet,
Iterable,
List,
Mapping,
MutableMapping,
Optional,
Set,
Tuple,
Union,
cast
)
from .base import (
_Base,
_extract_states_alphabet,
_error_message,
_good_alphabet,
_check_input
)
State = str
Symbol = str
Regex = str
FsaTransitionFunction = Mapping[
Tuple[State, Symbol], Union[State, AbstractSet[State]]
]
GnfaTransitionFunction = Mapping[Tuple[State, State], Regex]
MutableGnfaTF = MutableMapping[Tuple[State, State], Regex]
NfaTransitionFunction = Mapping[Tuple[State, Symbol], AbstractSet[State]]
MutableNfaTF = MutableMapping[Tuple[State, Symbol], Set[State]]
OPERATORS = ['sentinel', '|', '', '*']
PARENTHE = ['(', ')']
EMPTIES = ['', '']
NOT_SYMBOLS = OPERATORS + PARENTHE + EMPTIES
DfaTransitionFunction = Mapping[Tuple[State, Symbol], State]
| 39.506971 | 91 | 0.595939 |
e0112c386596780c5a86b0d6086dc2f7b6c2be8a
| 429 |
py
|
Python
|
Numbers/Roman Number Generator/tests.py
|
fossabot/IdeaBag2-Solutions
|
73b554d9796510fc86e5fc55016732aa866266c6
|
[
"MIT"
] | 10 |
2018-07-06T22:05:45.000Z
|
2021-05-22T11:29:04.000Z
|
Numbers/Roman Number Generator/tests.py
|
jarik-marwede/IdeaBag2-Projects
|
c5fe9524ef03a6ebc098ab8aaee7448f5b877828
|
[
"MIT"
] | 22 |
2018-07-13T17:16:43.000Z
|
2022-01-11T11:16:08.000Z
|
Numbers/Roman Number Generator/tests.py
|
jarik-marwede/IdeaBag2-Projects
|
c5fe9524ef03a6ebc098ab8aaee7448f5b877828
|
[
"MIT"
] | 1 |
2020-06-13T18:53:51.000Z
|
2020-06-13T18:53:51.000Z
|
#!/usr/bin/env python3
import unittest
from roman_number_generator import arabic_to_roman
if __name__ == "__main__":
unittest.main()
| 23.833333 | 60 | 0.717949 |
e012a92e1f872614d01a6331fee5e35c430a31f7
| 261 |
py
|
Python
|
modules/moduleBase.py
|
saintaardvark/glouton-satnogs-data-downloader
|
dc8671340f558b1a21b41b9b04bab05fc15c7809
|
[
"MIT"
] | null | null | null |
modules/moduleBase.py
|
saintaardvark/glouton-satnogs-data-downloader
|
dc8671340f558b1a21b41b9b04bab05fc15c7809
|
[
"MIT"
] | null | null | null |
modules/moduleBase.py
|
saintaardvark/glouton-satnogs-data-downloader
|
dc8671340f558b1a21b41b9b04bab05fc15c7809
|
[
"MIT"
] | null | null | null |
from infrastructure.satnogClient import SatnogClient
import os
| 29 | 66 | 0.762452 |
e013ea72c2e27425fa2415a60a17282e347acbb7
| 45,537 |
py
|
Python
|
oregano_plugins/fusion/server.py
|
MrNaif2018/Oregano
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
[
"MIT"
] | null | null | null |
oregano_plugins/fusion/server.py
|
MrNaif2018/Oregano
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
[
"MIT"
] | null | null | null |
oregano_plugins/fusion/server.py
|
MrNaif2018/Oregano
|
cc08f813f9cbdb80d1ac607892f8439ec064ee04
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#
# Oregano - a lightweight Ergon client
# CashFusion - an advanced coin anonymizer
#
# Copyright (C) 2020 Mark B. Lundeberg
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
A basic server implementation for CashFusion. Does not natively offer SSL
support, however a server admin may run an SSL server proxy such as nginx for
that purpose.
"""
import secrets
import sys
import threading
import time
import traceback
from collections import defaultdict
import oregano.schnorr as schnorr
from oregano.address import Address
from oregano.util import PrintError, ServerError, TimeoutException
from . import fusion_pb2 as pb
from .comms import send_pb, recv_pb, ClientHandlerThread, GenericServer, get_current_genesis_hash
from .protocol import Protocol
from .util import (FusionError, sha256, calc_initial_hash, calc_round_hash, gen_keypair, tx_from_components,
rand_position)
from .validation import (check_playercommit, check_covert_component, validate_blame, ValidationError,
check_input_electrumx)
# Resistor "E series" values -- round numbers that are almost geometrically uniform
E6 = [1.0, 1.5, 2.2, 3.3, 4.7, 6.8]
E12 = [1.0, 1.2, 1.5, 1.8, 2.2, 2.7, 3.3, 3.9, 4.7, 5.6, 6.8, 8.2]
E24 = [1.0, 1.1, 1.2, 1.3, 1.5, 1.6, 1.8, 2.0, 2.2, 2.4, 2.7, 3.0, 3.3, 3.6, 3.9, 4.3, 4.7, 5.1, 5.6, 6.2, 6.8, 7.5, 8.2, 9.1]
# TODO - make these configurable
# How long covert connections are allowed to stay open without activity.
# note this needs to consider the maximum interval between messages:
# - how long from first connection to last possible Tor component submission?
# - how long from one round's component submission to the next round's component submission?
COVERT_CLIENT_TIMEOUT = 40
# used for non-cryptographic purposes
import random
rng = random.Random()
rng.seed(secrets.token_bytes(32))
| 45.310448 | 174 | 0.599798 |
e014451ff2d26b3e408bb00a4f1a954adc75daa5
| 2,229 |
py
|
Python
|
Excercici4Package/ex4.py
|
jtorrenth/CienciaDades
|
81f005ed1ddcc218dcde8c5e2f1a297444389a82
|
[
"MIT"
] | null | null | null |
Excercici4Package/ex4.py
|
jtorrenth/CienciaDades
|
81f005ed1ddcc218dcde8c5e2f1a297444389a82
|
[
"MIT"
] | null | null | null |
Excercici4Package/ex4.py
|
jtorrenth/CienciaDades
|
81f005ed1ddcc218dcde8c5e2f1a297444389a82
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
#Funcio per a l'excercici 4.4
| 31.842857 | 154 | 0.674742 |
e0161b99cffb06588c8cd2a39e9f07abf59540ea
| 18,987 |
bzl
|
Python
|
build/rules.bzl
|
filmil/bazel-ebook
|
433f1e157c6c1b7867abf72bc0e882c07477d60d
|
[
"Apache-2.0"
] | 9 |
2020-05-31T10:24:57.000Z
|
2021-12-21T10:07:51.000Z
|
build/rules.bzl
|
filmil/bazel-ebook
|
433f1e157c6c1b7867abf72bc0e882c07477d60d
|
[
"Apache-2.0"
] | 2 |
2021-11-09T23:25:01.000Z
|
2021-11-10T08:42:22.000Z
|
build/rules.bzl
|
filmil/bazel-ebook
|
433f1e157c6c1b7867abf72bc0e882c07477d60d
|
[
"Apache-2.0"
] | 2 |
2020-06-03T13:21:33.000Z
|
2021-12-01T20:17:46.000Z
|
# Copyright (C) 2020 Google Inc.
#
# This file has been licensed under Apache 2.0 license. Please see the LICENSE
# file at the root of the repository.
# Build rules for building ebooks.
# This is the container
CONTAINER = "filipfilmar/ebook-buildenv:1.1"
# Use this for quick local runs.
#CONTAINER = "ebook-buildenv:local"
EbookInfo = provider(fields=["figures", "markdowns"])
# Returns the docker_run script invocation command based on the
# script path and its reference directory.
#
# Params:
# script_path: (string) The full path to the script to invoke
# dir_reference: (string) The path to a file used for figuring out
# the reference directories (build root and repo root).
drawtiming_png = rule(implementation = _drawtiming_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".t"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a timing diagram file into png using drawtiming",
)
neato_png = rule(implementation = _neato_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using neato",
)
dot_png = rule(implementation = _dot_png_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".dot"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform a graphviz dot file into png using dot",
)
asymptote = rule(implementation = _asymptote_impl,
attrs = {
"srcs": attr.label_list(
allow_files = [".asy"],
doc = "The file to compile",
),
"deps": attr.label_list(
doc = "The dependencies, any targets should be allowed",
),
"output": attr.output(doc="The generated file"),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Transform an asymptote file into png",
)
markdown_lib = rule(
implementation = _markdown_lib_impl,
doc = "Declares a set of markdown files",
attrs = {
"srcs": attr.label_list(
allow_files = [".md"],
doc = "The markdown source files",
),
"deps": attr.label_list(
doc = "The file to compile",
providers = [EbookInfo],
),
},
)
ebook_epub = rule(
implementation = _ebook_epub_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in EPUB format"
)
ebook_pdf = rule(
implementation = _ebook_pdf_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in PDF format"
)
ebook_kindle = rule(
implementation = _ebook_kindle_impl,
attrs = {
"deps": attr.label_list(
doc = "All the targets you need to make this book work.",
providers = [EbookInfo],
),
"title_yaml": attr.label(
allow_files = True,
doc = "The title.yaml file to use for this book",
),
"metadata_xml": attr.label(
allow_files = True,
doc = "The epub-metadata.xml file to use for this book",
),
"_script": attr.label(
default="//build:docker_run",
executable=True,
cfg="host"),
},
doc = "Generate an ebook in the Kindle's MOBI format"
)
| 32.126904 | 103 | 0.599357 |
e0164a1f4fee849a8bca46fb970244ecbfd603fe
| 715 |
py
|
Python
|
1094 EXPERIENCIAS.py
|
castrolimoeiro/Uri-exercise
|
7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb
|
[
"MIT"
] | null | null | null |
1094 EXPERIENCIAS.py
|
castrolimoeiro/Uri-exercise
|
7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb
|
[
"MIT"
] | null | null | null |
1094 EXPERIENCIAS.py
|
castrolimoeiro/Uri-exercise
|
7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb
|
[
"MIT"
] | null | null | null |
n = int(input())
coelho = rato = sapo = contador = 0
for i in range(0, n):
q, t = input().split(' ')
t = t.upper()
q = int(q)
if 1 <= q <= 15:
contador += q
if t == 'C':
coelho += q
elif t == 'R':
rato += q
elif t == 'S':
sapo += q
porccoelho = (coelho * 100) / contador
porcrato = (rato * 100) / contador
porcsapo = (sapo * 100) / contador
print(f'Total: {contador} cobaias')
print(f'Total de coelhos: {coelho}')
print(f'Total de ratos: {rato}')
print(f'Total de sapos: {sapo}')
print(f'Percentual de coelhos: {porccoelho:.2f} %')
print(f'Percentual de ratos: {porcrato:.2f} %')
print(f'Percentual de sapos: {porcsapo:.2f} %')
| 25.535714 | 51 | 0.544056 |
e016b94fa3454d62f7b448ca14631899dd78dc4c
| 299 |
py
|
Python
|
gravur/common/amountinput.py
|
F483/gravur
|
575c268d9ac28aa0ba00f1e5109bd74c3b7b69a5
|
[
"MIT"
] | 3 |
2015-07-20T17:56:21.000Z
|
2017-10-22T05:52:13.000Z
|
gravur/common/amountinput.py
|
F483/gravur
|
575c268d9ac28aa0ba00f1e5109bd74c3b7b69a5
|
[
"MIT"
] | null | null | null |
gravur/common/amountinput.py
|
F483/gravur
|
575c268d9ac28aa0ba00f1e5109bd74c3b7b69a5
|
[
"MIT"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2015 Fabian Barkhau <[email protected]>
# License: MIT (see LICENSE file)
from kivy.uix.boxlayout import BoxLayout
from gravur.common.labelbox import LabelBox # NOQA
from gravur.utils import load_widget
| 21.357143 | 62 | 0.772575 |
e018a8edf8d16988caad3f9660a381b73b1f97c4
| 17,156 |
py
|
Python
|
tibanna/top.py
|
4dn-dcic/tibanna
|
bb84597c425a481a230be30cb0ed9b99c774e53d
|
[
"MIT"
] | 62 |
2017-02-16T02:16:22.000Z
|
2022-02-07T08:26:12.000Z
|
tibanna/top.py
|
4dn-dcic/tibanna
|
bb84597c425a481a230be30cb0ed9b99c774e53d
|
[
"MIT"
] | 77 |
2017-10-26T20:17:35.000Z
|
2022-03-25T22:56:32.000Z
|
tibanna/top.py
|
4dn-dcic/tibanna
|
bb84597c425a481a230be30cb0ed9b99c774e53d
|
[
"MIT"
] | 19 |
2017-01-27T16:37:37.000Z
|
2021-12-12T13:52:01.000Z
|
import datetime
class Process(object):
| 47.392265 | 148 | 0.6199 |
e01a18c1d0d2ecbc1fcb6159c9f9c87becb0c6cc
| 1,458 |
py
|
Python
|
venv/Lib/site-packages/zmq/tests/test_draft.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 603 |
2020-12-23T13:49:32.000Z
|
2022-03-31T23:38:03.000Z
|
venv/Lib/site-packages/zmq/tests/test_draft.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 387 |
2020-12-15T14:54:04.000Z
|
2022-03-31T07:00:21.000Z
|
venv/Lib/site-packages/zmq/tests/test_draft.py
|
ajayiagbebaku/NFL-Model
|
afcc67a85ca7138c58c3334d45988ada2da158ed
|
[
"MIT"
] | 35 |
2021-03-26T03:12:04.000Z
|
2022-03-23T10:15:10.000Z
|
# -*- coding: utf8 -*-
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
import os
import platform
import time
import pytest
import zmq
from zmq.tests import BaseZMQTestCase, skip_pypy
| 29.16 | 71 | 0.593964 |
e01cbf8a1a1ab981a1d993596c3a332451dcd74d
| 367 |
py
|
Python
|
pythonlibs/mantis/templates/webapp/src/webapp/base.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 22 |
2019-10-28T07:28:12.000Z
|
2022-03-19T15:36:41.000Z
|
AliceBackend/src/AliceBackend/base.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 1 |
2019-11-07T04:54:14.000Z
|
2019-11-07T07:12:48.000Z
|
AliceBackend/src/AliceBackend/base.py
|
adoggie/Tibet.6
|
3c53060edafd80b9c4dafa10699a68d86a410c66
|
[
"MIT"
] | 13 |
2019-10-28T07:29:07.000Z
|
2021-11-03T06:53:12.000Z
|
#coding:utf-8
| 24.466667 | 80 | 0.643052 |
e01cd6185b052b2c9153c8eec135e9e3a2cf7572
| 667 |
py
|
Python
|
base/site-packages/django_qbe/urls.py
|
edisonlz/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 285 |
2019-12-23T09:50:21.000Z
|
2021-12-08T09:08:49.000Z
|
base/site-packages/django_qbe/urls.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | null | null | null |
base/site-packages/django_qbe/urls.py
|
jeckun/fastor
|
342078a18363ac41d3c6b1ab29dbdd44fdb0b7b3
|
[
"Apache-2.0"
] | 9 |
2019-12-23T12:59:25.000Z
|
2022-03-15T05:12:11.000Z
|
# -*- coding: utf-8 -*-
from django.conf.urls.defaults import patterns, url
from django_qbe.exports import formats
urlpatterns = patterns('django_qbe.views',
url(r'^$', 'qbe_form', name="qbe_form"),
url(r'^js/$', 'qbe_js', name="qbe_js"),
url(r'^results/bookmark/$',
'qbe_bookmark', name="qbe_bookmark"),
url(r'^results/export/(?P<format>(%s))/$' % "|".join(formats.keys()),
'qbe_export', name="qbe_export"),
url(r'^results/proxy/$',
'qbe_proxy', name="qbe_proxy"),
url(r'^results/(?P<query_hash>(.*))/$',
'qbe_results', name="qbe_results"),
url(r'^auto/$', 'qbe_autocomplete', name="qbe_autocomplete"),
)
| 37.055556 | 73 | 0.611694 |
e01d041f8b5c1564d154529462e58e50b56f4910
| 5,264 |
py
|
Python
|
augment.py
|
docongminh/Text-Image-Augmentation-python
|
da27e8346ce2339f801335923faf7b14e026fd90
|
[
"Apache-2.0"
] | 217 |
2020-02-09T07:44:18.000Z
|
2022-03-24T03:52:51.000Z
|
ocraug/augment.py
|
lzmisscc/Text-Image-Augmentation-python
|
12f104452e939444eb0fd4ac96143b78d091845b
|
[
"Apache-2.0"
] | 5 |
2020-03-23T02:24:33.000Z
|
2022-03-13T07:02:04.000Z
|
ocraug/augment.py
|
lzmisscc/Text-Image-Augmentation-python
|
12f104452e939444eb0fd4ac96143b78d091845b
|
[
"Apache-2.0"
] | 42 |
2020-02-10T06:42:31.000Z
|
2022-03-13T11:54:18.000Z
|
# -*- coding:utf-8 -*-
# Author: RubanSeven
# import cv2
import numpy as np
# from transform import get_perspective_transform, warp_perspective
from warp_mls import WarpMLS
# def distort(src, segment):
# img_h, img_w = src.shape[:2]
# dst = np.zeros_like(src, dtype=np.uint8)
#
# cut = img_w // segment
# thresh = img_h // 8
#
# src_pts = list()
# # dst_pts = list()
#
# src_pts.append([-np.random.randint(thresh), -np.random.randint(thresh)])
# src_pts.append([-np.random.randint(thresh), img_h + np.random.randint(thresh)])
#
# # dst_pts.append([0, 0])
# # dst_pts.append([0, img_h])
# dst_box = np.array([[0, 0], [0, img_h], [cut, 0], [cut, img_h]], dtype=np.float32)
#
# half_thresh = thresh * 0.5
#
# for cut_idx in np.arange(1, segment, 1):
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([cut * cut_idx + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
#
# # dst_pts.append([cut * i, 0])
# # dst_pts.append([cut * i, img_h])
#
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # print(mat)
# # dst[:, cut * (cut_idx - 1):cut * cut_idx] = cv2.warpPerspective(src, mat, (cut, img_h))
#
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (cut_idx - 1):cut * cut_idx] = warp_perspective(src, mat, (cut, img_h))
# # print(mat)
#
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# np.random.randint(thresh) - half_thresh])
# src_pts.append([img_w + np.random.randint(thresh) - half_thresh,
# img_h + np.random.randint(thresh) - half_thresh])
# src_box = np.array(src_pts[-4:-2] + src_pts[-2:-1] + src_pts[-1:], dtype=np.float32)
#
# # mat = cv2.getPerspectiveTransform(src_box, dst_box)
# # dst[:, cut * (segment - 1):] = cv2.warpPerspective(src, mat, (img_w - cut * (segment - 1), img_h))
# mat = get_perspective_transform(dst_box, src_box)
# dst[:, cut * (segment - 1):] = warp_perspective(src, mat, (img_w - cut * (segment - 1), img_h))
#
# return dst
| 33.74359 | 107 | 0.586816 |
e01de102906f7a6f8c39855d08b6adaa53f5663c
| 1,347 |
py
|
Python
|
Graph/print all paths from two vertices in a directed graph.py
|
ikaushikpal/DS-450-python
|
9466f77fb9db9e6a5bb3f20aa89ba6332f49e848
|
[
"MIT"
] | 3 |
2021-06-28T12:04:19.000Z
|
2021-09-07T07:23:41.000Z
|
Graph/print all paths from two vertices in a directed graph.py
|
SupriyoDam/DS-450-python
|
5dc21ce61b3279e9bd9d6ef3ad236667227ca283
|
[
"MIT"
] | null | null | null |
Graph/print all paths from two vertices in a directed graph.py
|
SupriyoDam/DS-450-python
|
5dc21ce61b3279e9bd9d6ef3ad236667227ca283
|
[
"MIT"
] | 1 |
2021-06-28T15:42:55.000Z
|
2021-06-28T15:42:55.000Z
|
from collections import defaultdict
if __name__ == "__main__":
g = Graph()
g.addEdge("A", "B")
g.addEdge("B", "D")
g.addEdge("A", "D")
g.addEdge("C", "A")
g.addEdge("C", "B")
g.addEdge("A", "C")
paths = g.printAllPaths("A", "B")
print(paths)
| 28.659574 | 85 | 0.628062 |
e01e00717692398432049be3d51d551f012c222e
| 1,958 |
py
|
Python
|
tests/pipegeojson_test/test_pipegeojson.py
|
kamyarrasta/berrl
|
1cf2ba8194498ec8f80d2908399ad00f1e963d83
|
[
"Apache-2.0"
] | 1 |
2016-03-04T18:30:48.000Z
|
2016-03-04T18:30:48.000Z
|
tests/pipegeojson_test/test_pipegeojson.py
|
kamyarrasta/berrl
|
1cf2ba8194498ec8f80d2908399ad00f1e963d83
|
[
"Apache-2.0"
] | null | null | null |
tests/pipegeojson_test/test_pipegeojson.py
|
kamyarrasta/berrl
|
1cf2ba8194498ec8f80d2908399ad00f1e963d83
|
[
"Apache-2.0"
] | null | null | null |
# testing the output of pipegeojson against different input types
import berrl as bl
import itertools
# making line with csv file location
line1=bl.make_line('csvs/line_example.csv')
# making line with list
testlist=bl.read('csvs/line_example.csv')
line2=bl.make_line(testlist,list=True)
# testing each line geojson against each other
ind=0
for a,b in itertools.izip(line1,line2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0:
passing=0
else:
passing=1
# making points with csv file location
points1=bl.make_line('csvs/points_example.csv')
# making points with list
testlist=bl.read('csvs/points_example.csv')
points2=bl.make_line(testlist,list=True)
# testing each points geojson against each other
ind=0
for a,b in itertools.izip(points1,points2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# making blocks with csv file location
blocks1=bl.make_line('csvs/blocks_example.csv')
# making blocks with list
testlist=bl.read('csvs/blocks_example.csv')
blocks2=bl.make_line(testlist,list=True)
# testing each bloocks geojson against each other
ind=0
for a,b in itertools.izip(blocks1,blocks2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# making blocks with csv file location
polygon1=bl.make_line('csvs/polygon_example.csv')
# making blocks with list
testlist=bl.read('csvs/polygon_example.csv')
polygon2=bl.make_line(testlist,list=True)
# testing each bloocks geojson against each other
ind=0
for a,b in itertools.izip(polygon1,polygon2):
if not a==b:
ind=1
# carrying the passing of status down to the test for the rest
if ind==0 and passing==0:
passing=0
else:
passing=1
# printing output result
if passing==0:
print 'pipegeojson build passed'
else:
print 'pipegeojson build failed'
| 22.25 | 65 | 0.759959 |
e01f044aab30cbd5165bae297a319d57b579704e
| 912 |
py
|
Python
|
tierpsy/debugging/catch_infinite_loop.py
|
mgh17/tierpsy-tracker
|
a18c06aa80a5fb22fd51563d82c639b520742777
|
[
"MIT"
] | 9 |
2021-01-11T10:49:21.000Z
|
2022-02-28T15:48:00.000Z
|
tierpsy/debugging/catch_infinite_loop.py
|
mgh17/tierpsy-tracker
|
a18c06aa80a5fb22fd51563d82c639b520742777
|
[
"MIT"
] | 18 |
2020-05-08T15:43:08.000Z
|
2022-03-23T10:19:24.000Z
|
tierpsy/debugging/catch_infinite_loop.py
|
mgh17/tierpsy-tracker
|
a18c06aa80a5fb22fd51563d82c639b520742777
|
[
"MIT"
] | 10 |
2019-12-18T12:10:12.000Z
|
2022-01-05T09:12:47.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon May 8 16:19:07 2017
@author: ajaver
"""
import os
import cv2
import sys
import glob
import threading
from functools import partial
main_dir = '/Volumes/behavgenom_archive$/Celine/raw/'
fnames = glob.glob(os.path.join(main_dir, '**', '*.avi'))
fnames = [x for x in fnames if not x.endswith('_seg.avi')]
fnames = sorted(fnames)
all_threads = []
for ii, video_file in enumerate(fnames):
print(ii, video_file)
vid = cv2.VideoCapture(video_file)
vid.release()
t = threading.Thread(target = partial(get_and_release, video_file))
t.start()
all_threads.append((video_file, t))
| 21.714286 | 71 | 0.663377 |
e0201884251a727105b3a8b3946ca3bc3aefd73d
| 480 |
py
|
Python
|
devito/passes/iet/languages/C.py
|
guaacoelho/devito
|
7e0b873114675752c4a49ed9076ee5d52997833c
|
[
"MIT"
] | 199 |
2016-08-18T23:33:05.000Z
|
2019-12-24T07:08:48.000Z
|
devito/passes/iet/languages/C.py
|
guaacoelho/devito
|
7e0b873114675752c4a49ed9076ee5d52997833c
|
[
"MIT"
] | 949 |
2016-04-25T11:41:34.000Z
|
2019-12-27T10:43:40.000Z
|
devito/passes/iet/languages/C.py
|
guaacoelho/devito
|
7e0b873114675752c4a49ed9076ee5d52997833c
|
[
"MIT"
] | 78 |
2016-08-30T07:42:34.000Z
|
2019-12-13T20:34:45.000Z
|
from devito.ir import Call
from devito.passes.iet.definitions import DataManager
from devito.passes.iet.langbase import LangBB
__all__ = ['CBB', 'CDataManager']
| 21.818182 | 53 | 0.591667 |
e0215d4c222f248ad7105000615a748c88340354
| 2,026 |
py
|
Python
|
tests/_test_image.py
|
Freakwill/ell
|
8aa510cefb5d63db35071820208971013fac154c
|
[
"MIT"
] | null | null | null |
tests/_test_image.py
|
Freakwill/ell
|
8aa510cefb5d63db35071820208971013fac154c
|
[
"MIT"
] | null | null | null |
tests/_test_image.py
|
Freakwill/ell
|
8aa510cefb5d63db35071820208971013fac154c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Test methods about image process
Make sure the existance of the images
"""
from ell import *
import numpy as np
_filter = Filter.from_name('db4')
| 28.535211 | 109 | 0.605133 |
e02439282d17416800f4bfd8e050f404bc4d7706
| 5,991 |
py
|
Python
|
donkeycar/parts/pytorch/torch_data.py
|
adricl/donkeycar
|
8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef
|
[
"MIT"
] | 1,100 |
2017-01-18T16:08:33.000Z
|
2018-11-04T00:42:54.000Z
|
donkeycar/parts/pytorch/torch_data.py
|
adricl/donkeycar
|
8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef
|
[
"MIT"
] | 199 |
2016-12-20T07:45:16.000Z
|
2018-11-01T02:30:12.000Z
|
donkeycar/parts/pytorch/torch_data.py
|
adricl/donkeycar
|
8eb2705ed4161c0d6a9cfd9c7b0a1c0ca5abaeef
|
[
"MIT"
] | 521 |
2017-01-10T21:53:24.000Z
|
2018-11-01T18:17:52.000Z
|
# PyTorch
import torch
from torch.utils.data import IterableDataset, DataLoader
from donkeycar.utils import train_test_split
from donkeycar.parts.tub_v2 import Tub
from torchvision import transforms
from typing import List, Any
from donkeycar.pipeline.types import TubRecord, TubDataset
from donkeycar.pipeline.sequence import TubSequence
import pytorch_lightning as pl
def get_default_transform(for_video=False, for_inference=False, resize=True):
"""
Creates a default transform to work with torchvision models
Video transform:
All pre-trained models expect input images normalized in the same way,
i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W),
where H and W are expected to be 112, and T is a number of video frames
in a clip. The images have to be loaded in to a range of [0, 1] and
then normalized using mean = [0.43216, 0.394666, 0.37645] and
std = [0.22803, 0.22145, 0.216989].
"""
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
input_size = (224, 224)
if for_video:
mean = [0.43216, 0.394666, 0.37645]
std = [0.22803, 0.22145, 0.216989]
input_size = (112, 112)
transform_items = [
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std)
]
if resize:
transform_items.insert(0, transforms.Resize(input_size))
return transforms.Compose(transform_items)
| 37.21118 | 95 | 0.633951 |
e0251d7f1fc5a3340792a778c923482bb49bcf13
| 316 |
py
|
Python
|
lite/__init__.py
|
CleverInsight/sparx-lite
|
1b729e8d11292e9737d57e092ee8916999ab1338
|
[
"MIT"
] | null | null | null |
lite/__init__.py
|
CleverInsight/sparx-lite
|
1b729e8d11292e9737d57e092ee8916999ab1338
|
[
"MIT"
] | null | null | null |
lite/__init__.py
|
CleverInsight/sparx-lite
|
1b729e8d11292e9737d57e092ee8916999ab1338
|
[
"MIT"
] | null | null | null |
import os
from tornado.template import Template
__SNIPPET__ = os.path.join(os.path.dirname(os.path.abspath(__file__)), '_snippet')
| 31.6 | 86 | 0.686709 |
e025cd2fbcd0226b08e7474394109f24f199f13c
| 3,857 |
py
|
Python
|
homeassistant/components/sensor/hddtemp.py
|
mdonoughe/home-assistant
|
d9805160bc787146bff0c434fdcab995716f0f8c
|
[
"Apache-2.0"
] | 2 |
2020-02-20T18:47:55.000Z
|
2021-11-09T11:33:28.000Z
|
homeassistant/components/sensor/hddtemp.py
|
mdonoughe/home-assistant
|
d9805160bc787146bff0c434fdcab995716f0f8c
|
[
"Apache-2.0"
] | 1 |
2021-02-08T20:56:06.000Z
|
2021-02-08T20:56:06.000Z
|
homeassistant/components/sensor/hddtemp.py
|
diophung/home-assistant
|
a5aa1118937702ca8bec050614ee52dc14f8466b
|
[
"Apache-2.0"
] | 1 |
2020-11-21T09:37:47.000Z
|
2020-11-21T09:37:47.000Z
|
"""
Support for getting the disk temperature of a host.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.hddtemp/
"""
import logging
from datetime import timedelta
from telnetlib import Telnet
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_PORT, TEMP_CELSIUS, TEMP_FAHRENHEIT, CONF_DISKS)
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE = 'device'
ATTR_MODEL = 'model'
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 7634
DEFAULT_NAME = 'HD Temperature'
DEFAULT_TIMEOUT = 5
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_DISKS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the HDDTemp sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
disks = config.get(CONF_DISKS)
hddtemp = HddTempData(host, port)
hddtemp.update()
if hddtemp.data is None:
return False
if not disks:
disks = [next(iter(hddtemp.data)).split('|')[0]]
dev = []
for disk in disks:
if disk in hddtemp.data:
dev.append(HddTempSensor(name, disk, hddtemp))
add_devices(dev, True)
class HddTempData(object):
"""Get the latest data from HDDTemp and update the states."""
def __init__(self, host, port):
"""Initialize the data object."""
self.host = host
self.port = port
self.data = None
def update(self):
"""Get the latest data from HDDTemp running as daemon."""
try:
connection = Telnet(
host=self.host, port=self.port, timeout=DEFAULT_TIMEOUT)
data = connection.read_all().decode(
'ascii').lstrip('|').rstrip('|').split('||')
self.data = {data[i].split('|')[0]: data[i]
for i in range(0, len(data), 1)}
except ConnectionRefusedError:
_LOGGER.error(
"HDDTemp is not available at %s:%s", self.host, self.port)
self.data = None
| 29.219697 | 79 | 0.637283 |
e026ba13c5f7c12090e3dee6c5f9a4f65eca3bb7
| 1,402 |
py
|
Python
|
boomer.py
|
JohnnySn0w/BabbleBot
|
03a383b063e4f28049f27f8ec669f22767ed8a87
|
[
"MIT"
] | 1 |
2019-07-07T01:46:55.000Z
|
2019-07-07T01:46:55.000Z
|
boomer.py
|
JohnnySn0w/BabbleBot
|
03a383b063e4f28049f27f8ec669f22767ed8a87
|
[
"MIT"
] | 1 |
2019-07-26T18:34:02.000Z
|
2019-07-26T18:34:02.000Z
|
boomer.py
|
JohnnySn0w/BabbleBot
|
03a383b063e4f28049f27f8ec669f22767ed8a87
|
[
"MIT"
] | 1 |
2020-05-10T01:27:48.000Z
|
2020-05-10T01:27:48.000Z
|
import random
prefix = [
'Look at you! ',
'Bless ',
'Bless! ',
'I heard about that! ',
'Amen!',
'You and the kids doing alright?',
'Miss ya\'ll!'
]
suffix = [
'. Amen!',
'. God bless america',
'. God bless!',
' haha',
'. love ya!',
'. love ya\'ll!',
]
| 23.366667 | 76 | 0.53067 |
e026dd61a71f4c0236cf71cd04ff440228426371
| 1,303 |
py
|
Python
|
bot/views.py
|
eyobofficial/COVID-19-Mutual-Aid
|
42d30ce95b0e9c717c5eda3ecaafea2812ec34f7
|
[
"MIT"
] | null | null | null |
bot/views.py
|
eyobofficial/COVID-19-Mutual-Aid
|
42d30ce95b0e9c717c5eda3ecaafea2812ec34f7
|
[
"MIT"
] | 5 |
2020-03-19T17:49:50.000Z
|
2021-06-10T20:06:14.000Z
|
bot/views.py
|
eyobofficial/COVID-19-Mutual-Aid
|
42d30ce95b0e9c717c5eda3ecaafea2812ec34f7
|
[
"MIT"
] | null | null | null |
import telegram
from django.conf import settings
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.generic import View
from django.views.decorators.csrf import csrf_exempt
from braces.views import CsrfExemptMixin
from rest_framework.authentication import BasicAuthentication
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from .bots import TelegramBot
from .models import TelegramUser as User
| 29.613636 | 61 | 0.692249 |
e026df5666a9c260f8a2d313e1edc3eee3cad4f7
| 9,053 |
py
|
Python
|
code/counterfactual_generative_networks-main/imagenet/train_cgn.py
|
dummyxyz1/re_counterfactual_generative
|
4dda8e17a1123a564d60be82c17e9589155fb2e2
|
[
"MIT"
] | null | null | null |
code/counterfactual_generative_networks-main/imagenet/train_cgn.py
|
dummyxyz1/re_counterfactual_generative
|
4dda8e17a1123a564d60be82c17e9589155fb2e2
|
[
"MIT"
] | null | null | null |
code/counterfactual_generative_networks-main/imagenet/train_cgn.py
|
dummyxyz1/re_counterfactual_generative
|
4dda8e17a1123a564d60be82c17e9589155fb2e2
|
[
"MIT"
] | null | null | null |
import os
from datetime import datetime
from os.path import join
import pathlib
from tqdm import tqdm
import argparse
import torch
from torch import nn, optim
from torch.autograd import Variable
import torchvision
from torchvision.transforms import Pad
from torchvision.utils import make_grid
import repackage
repackage.up()
from imagenet.models import CGN
from imagenet.config import get_cfg_defaults
from shared.losses import *
from utils import Optimizers
from inception_score import *
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--model_name', default='tmp',
help='Weights and samples will be saved under experiments/model_name')
parser.add_argument('--weights_path', default='',
help='provide path to continue training')
parser.add_argument('--sampled_fixed_noise', default=False, action='store_true',
help='If you want a different noise vector than provided in the repo')
parser.add_argument('--save_singles', default=False, action='store_true',
help='Save single images instead of sheets')
parser.add_argument('--truncation', type=float, default=1.0,
help='Truncation value for noise sampling')
parser.add_argument('--episodes', type=int, default=300,
help="We don't do dataloading, hence, one episode = one gradient update.")
parser.add_argument('--batch_sz', type=int, default=1,
help='Batch size, use in conjunciton with batch_acc')
parser.add_argument('--batch_acc', type=int, default=4000,
help='pseudo_batch_size = batch_acc*batch size')
parser.add_argument('--save_iter', type=int, default=4000,
help='Save samples/weights every n iter')
parser.add_argument('--log_losses', default=False, action='store_true',
help='Print out losses')
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg = merge_args_and_cfg(args, cfg)
print(cfg)
main(cfg)
| 37.720833 | 98 | 0.629515 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.