max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
antares/query_alerts.py | EquinoxOmega0/timedomain | 4 | 12790351 | import numpy as np
from datetime import datetime
from astropy.io import ascii
from astropy.time import Time
from argparse import ArgumentParser
from antares_client.search import search, download
def build_query(ra0, dec0, fov, date):
"""Generate a query (a Python dictionary) to submit to the ANTARES client.
Parameters
----------
ra0 : float or None
Central RA for object search, in deg.
dec0 : float or None
Central declination for object search, in deg.
fov : float
Side length of box for search, in deg.
date : str
Start date for search; format is YYYY-MM-DD.
Returns
-------
query : dict
An ElasticSearch dictionary.
"""
# Build up the query.
query = { 'query': { 'bool': { 'must': [] } } }
# desi_candidate_test data stream:
# snfilter_last_proc_status should have a string like "Locus has two or
# more detections and is in DESI brightness range. Triggering."
query['query']['bool']['must'].append(
{ 'match':{ 'properties.snfilter_last_proc_status': '*DESI*' } })
# Set up the declination search.
if dec0 is not None:
ddec = 0.5 * fov
# dra / cos(dec) ensures an equal-area search rectangle.
dra = 0.5*fov / np.cos(np.radians(dec0))
query['query']['bool']['must'].append(
{'range': {'dec':{ 'gte':dec0-ddec, 'lte':dec0+ddec, } } })
else:
dra = 0.5*fov
# Set up the RA search.
if ra0 is not None:
query['query']['bool']['must'].append(
{'range': {'ra':{ 'gte':(ra0-dra)%360., 'lte':(ra0+dra)%360., } } })
# Set up the cumulative date search.
if date is not None:
tobs = Time(date).mjd
query['query']['bool']['must'].append(
{'range': {'mjd':{ 'gte':tobs, } } })
return query
if __name__ == '__main__':
today = datetime.today()
parser = ArgumentParser(description='Client API to query ANTARES alert DB')
parser.add_argument('--ra', default=None, type=float,
help='RA (J2000), in deg')
parser.add_argument('--dec', default=None, type=float,
help='Dec (J2000), in deg')
parser.add_argument('--tobs', default=datetime.today().strftime('%Y-%m-%d'),
help='Obs date [YYYY-MM-DD]')
args = parser.parse_args()
# Create query dict for ANTARES stream search.
query = build_query(ra0=args.ra, dec0=args.dec, fov=3.2, date=args.tobs)
print(query)
#result_set = search(query)
#print(result_set)
outfile = 'results_antares'
if args.ra is not None:
outfile = '{}_ra{:03.1f}'.format(outfile, args.ra)
if args.dec is not None:
outfile = '{}_dec{:03.1f}'.format(outfile, args.dec)
if args.tobs is not None:
outfile = '{}_{}'.format(outfile, args.tobs)
outfile += '.csv'
result_set = download(query, outfile, output_format='csv', decompress=True)
| 2.78125 | 3 |
MobileNetv2/1_pruning/src_code/mobilenetv2.py | aiiuii/AutoPruner | 19 | 12790352 | """
Creates a MobileNetV2 Model as defined in:
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>. (2018).
MobileNetV2: Inverted Residuals and Linear Bottlenecks
arXiv preprint arXiv:1801.04381.
import from https://github.com/tonylins/pytorch-mobilenet-v2
"""
import torch.nn as nn
import math
import torch
from . import my_op
__all__ = ['mobilenetv2']
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def conv_3x3_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, block_id, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.expand_ratio = expand_ratio
self.identity = stride == 1 and inp == oup
self.ReLU = nn.ReLU6(inplace=True)
if expand_ratio == 1:
self.conv1 = nn.Conv2d(inp, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn2 = nn.BatchNorm2d(oup)
else:
self.block_id = block_id
self.activation_size_list = [112, 56, 56, 28, 28, 28, 14, 14, 14, 14, 14, 14, 14, 7, 7, 7]
self.AP = my_op.APLayer(hidden_dim, hidden_dim, activation_size=self.activation_size_list[block_id], max_ks=2,
layer_id=block_id)
# hidden layer of each block
# 96, 144, 144, 192, 192, 192, 384, 384, 384, 384, 576, 576, 576, 960, 960, 960]
self.conv1 = nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False)
self.bn1 = nn.BatchNorm2d(hidden_dim)
self.conv2 = nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False)
self.bn2 = nn.BatchNorm2d(hidden_dim)
self.conv3 = nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
self.index_code = [] # the generated index code
self.scale_factor = 0.1
self.channel_index = [] # binary index code for evaluation
def forward(self, x):
output = x
if self.expand_ratio == 1:
x = self.ReLU(self.bn1(self.conv1(x)))
x = self.bn2(self.conv2(x))
else:
x = self.ReLU(self.bn1(self.conv1(x)))
x_scale = self.AP(x, self.scale_factor, self.channel_index)
self.index_code = x_scale
x = my_op.MyScale.apply(x, x_scale)
x = self.ReLU(self.bn2(self.conv2(x)))
x = my_op.MyScale.apply(x, x_scale)
x = self.bn3(self.conv3(x))
if self.identity:
return x + output
else:
return x
class MobileNetV2(nn.Module):
def __init__(self, model_path, num_classes=1000, width_mult=1.):
super(MobileNetV2, self).__init__()
# setting of inverted residual blocks
self.cfgs = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
input_channel = _make_divisible(32 * width_mult, 4 if width_mult == 0.1 else 8)
layers = [conv_3x3_bn(3, input_channel, 2)]
# building inverted residual blocks
block = InvertedResidual
block_id = -1
for t, c, n, s in self.cfgs:
output_channel = _make_divisible(c * width_mult, 4 if width_mult == 0.1 else 8)
for i in range(n):
layers.append(block(block_id, input_channel, output_channel, s if i == 0 else 1, t))
input_channel = output_channel
block_id += 1
self.features = nn.Sequential(*layers)
# building last several layers
output_channel = _make_divisible(1280 * width_mult, 4 if width_mult == 0.1 else 8) if width_mult > 1.0 else 1280
self.conv = conv_1x1_bn(input_channel, output_channel)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(output_channel, num_classes)
self._initialize_weights(model_path)
def forward(self, x, scale_factor=1.0, channel_index=None):
self.set_scale_factor(scale_factor)
if not self.training:
self.set_channel_index(channel_index)
x = self.features(x)
x = self.conv(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
index_code = self.get_index_code()
return x, index_code
def set_scale_factor(self, scale_factor):
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
block.scale_factor = scale_factor
def set_channel_index(self, channel_index):
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
block.channel_index = channel_index
def get_index_code(self):
index_code = []
for item in self.features._modules:
if item == '0' or item == '1':
continue # pass the first two blocks
block = self.features._modules[item]
index_code.append(block.index_code)
return index_code
def _initialize_weights(self, model_path):
model_weight = torch.load(model_path)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
my_weight = self.state_dict()
my_keys = list(my_weight)
new_keys = []
for item in my_keys:
if 'AP' not in item:
new_keys.append(item)
for i, (k, v) in enumerate(model_weight.items()):
my_weight[new_keys[i]] = v
self.load_state_dict(my_weight)
def mobilenetv2(**kwargs):
"""
Constructs a MobileNet V2 model
"""
return MobileNetV2(**kwargs)
if __name__ == '__main__':
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
model_path = '/mnt/data3/luojh/project/6_CURL/Journal/pretrained_model/ImageNet/mobilenetv2_1.0-0c6065bc.pth'
model = MobileNetV2(model_path).cuda()
input = torch.zeros((1, 3, 224, 224)).cuda()
output = model(input)
a=1
| 3.109375 | 3 |
apps/iotdb_cloud_core/admin.py | JulianFeinauer/iotdb-cloud | 6 | 12790353 | from django.contrib import admin
from apps.iotdb_cloud_core.models import IoTDBRelease
admin.site.register(IoTDBRelease)
| 1.257813 | 1 |
mygmm.py | DaiLisen/Eye-gaze-Point-Detection-Modified-sec- | 0 | 12790354 | import numpy as np
def gaussian(x,mu,sigma):
temp = -np.square(x-mu)/(2*sigma)
return np.exp(temp)/(np.sqrt(2.0*np.pi*sigma)) # sigma = sigma^2
def e_step(data, phais, mus, sigmas):
Qs = []
for i in xrange(len(data)):
q = [phai*gaussian(data[i],mu,sigma) for phai,mu,sigma in zip(phais,mus,sigmas)]
#print i,data[i ]
Qs.append(q)
Qs = np.array(Qs)
Qs = Qs / np.sum(Qs,axis=1).reshape(-1,1)
return Qs
def m_step(data, phais, mus, sigmas, Qs):
data = np.array(data)
gama_j = np.sum(Qs,axis=0)
new_phais = gama_j/len(data)
mu_temp = np.sum(Qs*(data.reshape(-1,1)),axis=0)
new_mus =mu_temp/gama_j
X_i_mu_j = np.square(np.array([data]).reshape(-1,1)-np.array([mus]))
new_sigmas = np.sum(Qs*X_i_mu_j,axis=0)/gama_j
return new_phais,new_mus,new_sigmas
def EM(data,k):
threshold = 1e-15
phais = [1.0/k for i in xrange(k)]
mus = [i for i in xrange(k)]
sigmas = [1 for i in xrange(k)]
phais0, mus0, sigmas0=[0],[0],[0]
# while True:
# Qs = e_step(data,phais,mus,sigmas)
# phais, mus, sigmas= m_step(data,phais,mus,sigmas,Qs)
# L1= [x-y for x,y in zip(phais0,phais)]
# L2 = [x - y for x, y in zip(mus0, mus)]
# L3 = [x - y for x, y in zip(sigmas0, sigmas)]
# L= np.sum(np.abs(np.array(L1))) \
# + np.sum(np.abs(np.array(L2))) \
# + np.sum(np.abs(np.array(L3)))
# phais0, mus0, sigmas0=phais, mus, sigmas
# print phais, mus, sigmas
# if L<threshold:
# break
for i in range(100):
Qs = e_step(data,phais,mus,sigmas)
phais, mus, sigmas= m_step(data,phais,mus,sigmas,Qs)
L1 = [x-y for x,y in zip(phais0,phais)]
L2 = [x - y for x, y in zip(mus0, mus)]
L3 = [x - y for x, y in zip(sigmas0, sigmas)]
L= np.sum(np.abs(np.array(L1))) \
+ np.sum(np.abs(np.array(L2))) \
+ np.sum(np.abs(np.array(L3)))
for j in range(3):
if phais[j]==0.0:
phais[j]=0.1e-50
if mus[j]==0.0:
mus[j]=0.1e-50
if sigmas[j]==0.0:
sigmas[j]=0.1e-50
phais0, mus0, sigmas0 = phais, mus, sigmas
#print phais, mus, sigmas
if L<threshold:
break
if i==999:
print "Time OUT"
print phais, mus, sigmas
return phais, mus, sigmas | 2.765625 | 3 |
score_following_game/evaluation/evaluation.py | CPJKU/score_following_game | 43 | 12790355 |
import copy
import numpy as np
PXL2CM = 0.035277778
def print_formatted_stats(stats):
"""
Print formatted results for result tables
"""
print("& {:.2f} & {:.2f} & {:.2f} & {:.2f} \\\\" .format(np.mean(stats['tracked_until_end_ratio']),
np.mean(stats['global_tracking_ratio']),
np.mean(stats['alignment_errors_mean'])*PXL2CM,
np.mean(stats['alignment_errors_std'])*PXL2CM))
def compute_alignment_stats(evaluation_data):
"""
Compute alignment stats
"""
alignment_errors = []
tracking_ratios = []
tracked_until_end = 0
tracked_onsets = 0
total_onsets = 0
for date_entry in evaluation_data:
alignment_errors += date_entry['alignment_errors']
tracking_ratios.append(date_entry['onsets_tracked'] / float(date_entry['total_onsets']))
if date_entry['onsets_tracked'] == date_entry['total_onsets']:
tracked_until_end += 1
tracked_onsets += date_entry['onsets_tracked']
total_onsets += date_entry['total_onsets']
alignment_errors = np.asarray(alignment_errors)
abs_alignment_errors = np.abs(alignment_errors)
tracking_ratios = np.asarray(tracking_ratios)
ae_mean, ae_median, ae_std = -1, -1, -1
if len(abs_alignment_errors) > 0:
ae_mean = abs_alignment_errors.mean()
ae_median = np.median(abs_alignment_errors)
ae_std = abs_alignment_errors.std()
tracking_ratios_mean = tracking_ratios.mean()
tracked_to_end_ratio = tracked_until_end / float(len(evaluation_data))
global_tracking_ratio = float(tracked_onsets) / total_onsets
stats = dict()
stats['alignment_errors_mean'] = ae_mean
stats['alignment_errors_median'] = ae_median
stats['alignment_errors_std'] = ae_std
stats['tracking_ratios_mean'] = tracking_ratios_mean
stats['global_tracking_ratio'] = global_tracking_ratio
stats['tracked_until_end_ratio'] = tracked_to_end_ratio
return stats
class Evaluator:
def __init__(self, make_env, evaluation_pools, config, trials=1, render_mode=None):
self.make_env = make_env
self.evaluation_pools = evaluation_pools
self.config = config
self.render_mode = render_mode
self.trials = trials
def _eval_pool(self, agent, pool, verbose):
pool.reset()
if verbose:
print(pool.get_current_song_name().ljust(60), end=" ")
env = self.make_env(pool, self.config, render_mode=self.render_mode)
alignment_errors = []
# get observations
episode_reward = 0
observation = env.reset()
onset_list = pool.get_current_song_onsets()
while True:
# choose action
action = agent.select_action(observation, train=False)
# perform step and observe
observation, reward, done, info = env.step(action)
episode_reward += reward
# keep alignment errors, only store tracking error if an onset occurs
if pool.curr_perf_frame in onset_list:
alignment_errors.append(pool.tracking_error())
if done:
break
# compute number of tracked onsets
onsets_tracked = np.sum(onset_list <= pool.curr_perf_frame)
song_data = {'alignment_errors': alignment_errors, 'onsets_tracked': onsets_tracked,
'total_onsets': len(onset_list)}
return song_data
def evaluate(self, agent, log_writer=None, log_step=0, verbose=False):
raise NotImplementedError
class PerformanceEvaluator(Evaluator):
def __init__(self, make_env, evaluation_pools, config, trials=1, render_mode=None):
Evaluator.__init__(self, make_env, evaluation_pools, config, trials, render_mode)
def evaluate(self, agent, log_writer=None, log_step=0, verbose=False):
mean_stats = None
for _ in range(self.trials):
evaluation_data = []
for pool in self.evaluation_pools:
song_data = self._eval_pool(agent, pool, verbose)
evaluation_data.append(song_data)
if verbose:
song_stats = compute_alignment_stats([song_data])
string = "tracking ratio: %.2f" % song_stats['global_tracking_ratio']
if song_stats['global_tracking_ratio'] == 1.0:
string += " +"
print(string)
# compute alignment stats
stats = compute_alignment_stats(evaluation_data)
stats['evaluation_data'] = evaluation_data
if mean_stats is None:
mean_stats = dict()
for key in stats.keys():
if key != "evaluation_data":
mean_stats[key] = []
for key in mean_stats.keys():
mean_stats[key].append(stats[key])
for key in mean_stats.keys():
mean_stats[key] = np.mean(mean_stats[key])
if log_writer is not None:
log_writer.add_scalar('eval/alignment_errors_mean', mean_stats['alignment_errors_mean'], log_step)
log_writer.add_scalar('eval/alignment_errors_median', mean_stats['alignment_errors_median'], log_step)
log_writer.add_scalar('eval/alignment_errors_std', mean_stats['alignment_errors_std'], log_step)
log_writer.add_scalar('eval/tracking_ratios_mean', mean_stats['tracking_ratios_mean'], log_step)
log_writer.add_scalar('eval/global_tracking_ratio', mean_stats['global_tracking_ratio'], log_step)
log_writer.add_scalar('eval/tracked_until_end_ratio', mean_stats['tracked_until_end_ratio'], log_step)
return mean_stats
class EmbeddingEvaluator(Evaluator):
def __init__(self, make_env, evaluation_pools, config, trials=1, render_mode=None):
Evaluator.__init__(self, make_env, evaluation_pools, config, trials, render_mode)
self.embedding = None
def store_embedding(self, module, input_, output_):
self.embedding = input_[0]
def register_hook(self, net):
embedding_layer = net._modules.get('policy_fc')
embedding_layer.register_forward_hook(self.store_embedding)
def _eval_pool(self, agent, pool, verbose):
self.register_hook(agent.model.net)
pool.reset()
if verbose:
print(pool.get_current_song_name())
env = self.make_env(pool, self.config, render_mode=self.render_mode)
plain_env = self.make_env(copy.deepcopy(pool), self.config, render_mode=self.render_mode)
while not hasattr(plain_env, 'rl_pool'):
plain_env = plain_env.env
plain_env.reset()
# get observations
observation = env.reset()
return_dicts = {'state': [],
'value': [],
'embedding': [],
'onsets_in_state': [],
'target_lost': [],
'song_name': [],
'tracking_error': [],
'speed': []}
# song_onsets = plain_env.rl_pool.curr_song.get_perf_onsets()
song_onsets = plain_env.rl_pool.curr_song.cur_perf['onsets_padded']
while True:
# choose action
action = agent.select_action(observation)
# perform step and observe
observation, reward, done, info = env.step(action)
cur_perf_frame = plain_env.rl_pool.curr_perf_frame
in_len = plain_env.rl_pool.perf_shape[-1]
onsets_in_input = len(list(filter(lambda o: cur_perf_frame-in_len <= o <= cur_perf_frame, song_onsets)))
# perform a step in the plain env to get the original observation
obs_org, r, d, _ = plain_env.step(action)
return_dicts['state'].append(obs_org)
return_dicts['value'].append(agent.predict_value(observation))
return_dicts['embedding'].append(self.embedding.cpu().data.numpy())
return_dicts['onsets_in_state'].append(onsets_in_input)
return_dicts['target_lost'].append(done)
return_dicts['song_name'].append(plain_env.rl_pool.curr_song.song_name)
return_dicts['tracking_error'].append(plain_env.rl_pool.tracking_error())
return_dicts['speed'].append(plain_env.rl_pool.sheet_speed)
if done:
break
tue = np.sum(song_onsets <= plain_env.rl_pool.curr_perf_frame) == len(song_onsets)
return_dicts['tue'] = [tue for _ in range(len(return_dicts['state']))]
return return_dicts
def evaluate(self, agent, log_writer=None, log_step=0, verbose=False):
return_dicts = {'state': [],
'value': [],
'embedding': [],
'onsets_in_state': [],
'tue': [],
'target_lost': [],
'song_name': [],
'tracking_error': [],
'speed': []}
for _ in range(self.trials):
for pool in self.evaluation_pools:
res = self._eval_pool(agent, pool, verbose)
return_dicts['state'].extend(res['state'])
return_dicts['value'].extend(res['value'])
return_dicts['embedding'].extend(res['embedding'])
return_dicts['onsets_in_state'].extend(res['onsets_in_state'])
return_dicts['tue'].extend(res['tue'])
return_dicts['target_lost'].extend(res['target_lost'])
return_dicts['song_name'].extend(res['song_name'])
return_dicts['tracking_error'].extend(res['tracking_error'])
return_dicts['speed'].extend(res['speed'])
return return_dicts
| 2.640625 | 3 |
submodules/datasets/datasets/human/parsing/mhp_v1.py | khy0809/WeightNet | 0 | 12790356 | from pathlib import Path
from torchvision.datasets import VisionDataset
import numpy as np
from PIL import Image
class MHPv1(VisionDataset):
"""
MHP dataset : Multi-Human Parsing
V1은 human parsing 만 있고, v2는 pose 포함
https://github.com/ZhaoJ9014/Multi-Human-Parsing
or https://lv-mhp.github.io/
The MHP v1.0 dataset contains 4,980 images,
each with at least two persons (average is 3).
We randomly choose 980 images and their corresponding annotations as the testing set.
The rest form a training set of 3,000 images and a validation set of 1,000 images.
For each instance, 18 semantic categories are defined and annotated except for the
"background" category, i.e. “hat”, “hair”, “sunglasses”, “upper clothes”, “skirt”,
“pants”, “dress”, “belt”, “left shoe”, “right shoe”, “face”, “left leg”, “right leg”,
“left arm”, “right arm”, “bag”, “scarf” and “torso skin”.
Each instance has a complete set of annotations whenever the corresponding category
appears in the current image.
List of contents:
./images:
All images in the dataset.
./annotations
The segmentation annotation files corresponding to the images.
One image is corresponding to multiple annotation files with the same prefix, one file per person. In each annotation file, the label represents:
0: 'background',
1: 'hat',
2: 'hair',
3: 'sunglass',
4: 'upper-clothes',
5: 'skirt',
6: 'pants',
7: 'dress',
8: 'belt',
9: 'left-shoe',
10: 'right-shoe',
11: 'face',
12: 'left-leg',
13: 'right-leg',
14: 'left-arm',
15: 'right-arm',
16: 'bag',
17: 'scarf',
18: 'torso-skin',
./visualization.m
Matlab script to visualize the annotations
./train_list.txt 4000개
The list of images for training and validataion
./test_list.txt 980개
The list of images for testing
"""
root = '/data/public/rw/datasets/human/parsing/LV-MHP-v1'
category = ('__background__', 'hat', 'hair', 'sunglass', 'upper-clothes',
'skirt', 'pants', 'dress', 'belt', 'left-shoe',
'right-shoe', 'face', 'left-leg', 'right-leg', 'left-arm',
'right-arm', 'bag', 'scarf', 'torso-skin',)
def __init__(self, what='train', transforms=None, transform=None, target_transform=None, root=None):
root = root or MHPv1.root
super(MHPv1, self).__init__(root=root, transforms=transforms,
transform=transform, target_transform=target_transform)
assert what in ('train', 'test')
self.what = what
root = Path(root)
self.imagepath = root / 'images'
self.annopath = root / 'annotations'
fname = root / f'{what}_list.txt'
with open(fname, 'r') as f:
image_ids = [line.split('.jpg')[0] for line in f.readlines()]
self.image_ids = image_ids
def __len__(self):
return len(self.image_ids)
def __getitem__(self, index):
i = self.image_ids[index]
fname = self.imagepath / f'{i}.jpg'
image = Image.open(fname)
files = self.annopath.glob(f'{i}_*.png')
anno = [Image.open(f) for f in files]
return image, anno
| 2.890625 | 3 |
Code/spark_connector.py | EthanTGo/DataWarehouseProject | 0 | 12790357 | <filename>Code/spark_connector.py
from pyspark.sql import SparkSession
import pyspark.sql.functions as f
'''
To run, please make sure you have the appropriate Spark and Scala version
- For my packages: I have Scale version 2.12 and Spark version 3.2.0
- This is important as we need to configure the appropriate version
Then please run the following command:
spark-submit --packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.2.0 spark_connector.py
'''
spark = SparkSession \
.builder \
.appName("StructuredNetworkWordCount") \
.getOrCreate()
# Use this code, so that when you run spark-submit
spark.sparkContext.setLogLevel("WARN")
inputDF = spark \
.readStream \
.format("kafka") \
.option("kafka.bootstrap.servers", "localhost:9092") \
.option("subscribe", "Twitter_Stream_Cleaned") \
.load()
wordDF = inputDF.select('value', 'timestamp').withColumn('word', f.explode(f.split(f.col('value'), ' ')))\
.withWatermark("timestamp", "1 seconds")\
.groupBy('timestamp','word')\
.count()\
concatDF = wordDF.withColumnRenamed('count', 'counter')
concatDF2 = concatDF.withColumn('value', f.concat(concatDF.word, concatDF.counter))
outputDf = concatDF2\
.writeStream \
.format("kafka").option("kafka.bootstrap.servers", "localhost:9092")\
.option("checkpointLocation", "checkpoint_kafka/") \
.option("topic", "WordCount") \
.start()\
.awaitTermination()
| 3.03125 | 3 |
laboratorios/laboratorio-5/coins.py | CristianGaleano04/ayed-2019-1 | 0 | 12790358 | from sys import stdin
def count(S, m, n):
tabla = [[0 for x in range(m)] for x in range(n+1)]
for i in range(m):
tabla[0][i] = 1
for i in range(1, n+1):
for j in range(m):
x = tabla[i - S[j]][j] if i-S[j] >= 0 else 0
y = tabla[i][j-1] if j >= 1 else 0
tabla[i][j] = x + y
return tabla[n][m-1]
def main():
n = int(stdin.readline().strip())
arr = list(map(int,stdin.readline().strip().split(',')))
m = len(arr)
print(count(arr, m, n))
main()
| 3.203125 | 3 |
UMLRT2Kiltera_MM/transformation_reduced/Himesis/HExitPoint2BProcDef_WhetherOrNotExitPtHasOutgoingTrans.py | levilucio/SyVOLT | 3 | 12790359 | <gh_stars>1-10
from core.himesis import Himesis
import cPickle as pickle
from uuid import UUID
class HExitPoint2BProcDef_WhetherOrNotExitPtHasOutgoingTrans(Himesis):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HExitPoint2BProcDef_WhetherOrNotExitPtHasOutgoingTrans.
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HExitPoint2BProcDef_WhetherOrNotExitPtHasOutgoingTrans, self).__init__(name='HExitPoint2BProcDef_WhetherOrNotExitPtHasOutgoingTrans', num_nodes=64, edges=[])
# Add the edges
self.add_edges([(3, 0), (0, 10), (2, 19), (19, 7), (7, 20), (20, 12), (7, 21), (21, 9), (9, 22), (22, 5), (39, 33), (33, 51), (40, 34), (34, 8), (41, 35), (35, 53), (42, 36), (36, 54), (43, 37), (37, 55), (44, 38), (38, 56), (7, 23), (23, 59), (12, 24), (24, 60), (5, 25), (25, 61), (2, 26), (26, 62), (9, 27), (27, 63), (4, 1), (1, 28), (1, 29), (1, 30), (1, 31), (1, 32), (8, 13), (13, 52), (8, 14), (14, 58), (28, 2), (2, 11), (6, 15), (15, 3), (6, 16), (16, 10), (3, 17), (17, 57), (10, 18), (18, 58), (11, 3), (6, 4), (32, 5), (39, 45), (40, 46), (41, 47), (42, 48), (43, 49), (44, 50), (29, 7), (30, 12), (31, 9), (45, 57), (46, 59), (47, 60), (48, 61), (49, 62), (50, 63)])
# Set the graph attributes
self["mm__"] = pickle.loads("""(lp1
S'UMLRT2Kiltera_MM'
p2
a.""")
self["name"] = """ExitPoint2BProcDef_WhetherOrNotExitPtHasOutgoingTrans"""
self["GUID__"] = UUID('9a5d8e88-0fdd-4097-9880-99d97ae786f8')
# Set the node attributes
self.vs[0]["associationType"] = """exitPoints"""
self.vs[0]["mm__"] = """directLink_S"""
self.vs[0]["GUID__"] = UUID('e5d4918f-5b4c-43e4-85b9-d102cc9864da')
self.vs[1]["mm__"] = """ApplyModel"""
self.vs[1]["GUID__"] = UUID('c1bd8725-071e-40c8-ab6d-a32fed65a6bd')
self.vs[2]["name"] = """localdef1"""
self.vs[2]["classtype"] = """LocalDef"""
self.vs[2]["mm__"] = """LocalDef"""
self.vs[2]["cardinality"] = """1"""
self.vs[2]["GUID__"] = UUID('8ac072f6-5da2-49b3-a0fe-0edbc84fbb79')
self.vs[3]["name"] = """state1"""
self.vs[3]["classtype"] = """State"""
self.vs[3]["mm__"] = """State"""
self.vs[3]["cardinality"] = """+"""
self.vs[3]["GUID__"] = UUID('3fb586b2-c58f-4a53-83b9-01c06ab1e45d')
self.vs[4]["mm__"] = """paired_with"""
self.vs[4]["GUID__"] = UUID('cc03af79-ed8e-40df-903a-7863def6ab5e')
self.vs[5]["name"] = """triggerT1"""
self.vs[5]["classtype"] = """Trigger_T"""
self.vs[5]["mm__"] = """Trigger_T"""
self.vs[5]["cardinality"] = """1"""
self.vs[5]["GUID__"] = UUID('57b80b6d-ba84-4d17-9fb9-eb5e5da69a0f')
self.vs[6]["mm__"] = """MatchModel"""
self.vs[6]["GUID__"] = UUID('17dc0457-f713-49be-bec8-3bfe3a6e4cd7')
self.vs[7]["name"] = """procdef1"""
self.vs[7]["classtype"] = """ProcDef"""
self.vs[7]["mm__"] = """ProcDef"""
self.vs[7]["cardinality"] = """1"""
self.vs[7]["GUID__"] = UUID('441ecffe-46e9-4eb1-8fff-48bfa3a0bc6c')
self.vs[8]["name"] = """concat1"""
self.vs[8]["mm__"] = """Concat"""
self.vs[8]["Type"] = """'String'"""
self.vs[8]["GUID__"] = UUID('bcbbeeab-4acd-43bf-91a2-f41784257055')
self.vs[9]["name"] = """par1"""
self.vs[9]["classtype"] = """Par"""
self.vs[9]["mm__"] = """Par"""
self.vs[9]["cardinality"] = """1"""
self.vs[9]["GUID__"] = UUID('17383f55-8caf-4770-8c3d-0655d59922f9')
self.vs[10]["name"] = """exitpoint1"""
self.vs[10]["classtype"] = """ExitPoint"""
self.vs[10]["mm__"] = """ExitPoint"""
self.vs[10]["cardinality"] = """+"""
self.vs[10]["GUID__"] = UUID('af91e5a2-67c4-4b0a-b528-66315fa1f67a')
self.vs[11]["type"] = """ruleDef"""
self.vs[11]["mm__"] = """backward_link"""
self.vs[11]["GUID__"] = UUID('f4eca68b-0262-4d69-a6da-678cf1a57853')
self.vs[12]["name"] = """name1"""
self.vs[12]["classtype"] = """Name"""
self.vs[12]["mm__"] = """Name"""
self.vs[12]["cardinality"] = """1"""
self.vs[12]["GUID__"] = UUID('935df064-d28f-4545-a4e1-4c5574a1ffe1')
self.vs[13]["mm__"] = """hasArgs"""
self.vs[13]["GUID__"] = UUID('10114e57-be9c-4a17-ac52-dc79d7141380')
self.vs[14]["mm__"] = """hasArgs"""
self.vs[14]["GUID__"] = UUID('7de42cea-8467-424e-8a88-2b8e1adf592e')
self.vs[15]["mm__"] = """match_contains"""
self.vs[15]["GUID__"] = UUID('70a122f5-4130-4a3f-802b-93f2bab1699a')
self.vs[16]["mm__"] = """match_contains"""
self.vs[16]["GUID__"] = UUID('be1e450e-d08a-429c-8469-7d0e8df105e1')
self.vs[17]["mm__"] = """hasAttribute_S"""
self.vs[17]["GUID__"] = UUID('ef987f62-e5dc-432b-ae8d-c4581462b5e0')
self.vs[18]["mm__"] = """hasAttribute_S"""
self.vs[18]["GUID__"] = UUID('263ba6a8-158c-44b2-a356-b50b1c26cf07')
self.vs[19]["associationType"] = """def"""
self.vs[19]["mm__"] = """directLink_T"""
self.vs[19]["GUID__"] = UUID('940cde40-c2de-4ccb-904f-e1e43b4a7768')
self.vs[20]["associationType"] = """channelNames"""
self.vs[20]["mm__"] = """directLink_T"""
self.vs[20]["GUID__"] = UUID('26aa638a-5489-423a-b896-641f036da3af')
self.vs[21]["associationType"] = """p"""
self.vs[21]["mm__"] = """directLink_T"""
self.vs[21]["GUID__"] = UUID('02fc4d15-b9f1-41b4-97f2-6d2554ffa689')
self.vs[22]["associationType"] = """p"""
self.vs[22]["mm__"] = """directLink_T"""
self.vs[22]["GUID__"] = UUID('0ff7da8c-f4c3-49a5-8199-4caf67d8f015')
self.vs[23]["mm__"] = """hasAttribute_T"""
self.vs[23]["GUID__"] = UUID('3254c8a6-ddc9-450a-b129-c6fb0f2efd1e')
self.vs[24]["mm__"] = """hasAttribute_T"""
self.vs[24]["GUID__"] = UUID('283e1252-32f7-4374-91a0-eecc4f052f58')
self.vs[25]["mm__"] = """hasAttribute_T"""
self.vs[25]["GUID__"] = UUID('d50460df-9db5-4cb2-9acc-0a07c5cdd52e')
self.vs[26]["mm__"] = """hasAttribute_T"""
self.vs[26]["GUID__"] = UUID('27886f57-6681-4071-9060-5929041d2ff6')
self.vs[27]["mm__"] = """hasAttribute_T"""
self.vs[27]["GUID__"] = UUID('b3028bc6-a6ca-4a83-996a-ce14ca07f938')
self.vs[28]["mm__"] = """apply_contains"""
self.vs[28]["GUID__"] = UUID('c1aec743-36dc-470d-abfd-eade12295dc6')
self.vs[29]["mm__"] = """apply_contains"""
self.vs[29]["GUID__"] = UUID('89c851cc-f7e6-4dc9-a86f-0a4baacc6051')
self.vs[30]["mm__"] = """apply_contains"""
self.vs[30]["GUID__"] = UUID('7d974bb3-d836-4df7-8a23-33d3247e8c1c')
self.vs[31]["mm__"] = """apply_contains"""
self.vs[31]["GUID__"] = UUID('e23a4608-f92b-4e41-aa37-720ef5664425')
self.vs[32]["mm__"] = """apply_contains"""
self.vs[32]["GUID__"] = UUID('abc73ad4-af9d-4f49-af48-9d925af0c8ca')
self.vs[33]["mm__"] = """rightExpr"""
self.vs[33]["GUID__"] = UUID('54875937-3bca-4836-8a48-54391648c336')
self.vs[34]["mm__"] = """rightExpr"""
self.vs[34]["GUID__"] = UUID('4d1b1494-4a67-4145-9082-c7c7d71d8379')
self.vs[35]["mm__"] = """rightExpr"""
self.vs[35]["GUID__"] = UUID('1e4f92d5-732d-4e97-8d09-e4676d1ce6a4')
self.vs[36]["mm__"] = """rightExpr"""
self.vs[36]["GUID__"] = UUID('8ac35cd5-e46e-4041-bbdd-3b7041fbfd28')
self.vs[37]["mm__"] = """rightExpr"""
self.vs[37]["GUID__"] = UUID('c39d81e8-4156-4573-ab09-7d01e79ab104')
self.vs[38]["mm__"] = """rightExpr"""
self.vs[38]["GUID__"] = UUID('68b3e83f-a7a3-4d3b-bf3f-0f2806f2f184')
self.vs[39]["name"] = """eq1"""
self.vs[39]["mm__"] = """Equation"""
self.vs[39]["GUID__"] = UUID('d906a172-b00a-4525-a678-b5b18f359c5f')
self.vs[40]["name"] = """eq2"""
self.vs[40]["mm__"] = """Equation"""
self.vs[40]["GUID__"] = UUID('4357d276-109b-4e9f-a92d-5321e76de0d6')
self.vs[41]["name"] = """eq3"""
self.vs[41]["mm__"] = """Equation"""
self.vs[41]["GUID__"] = UUID('baad9574-0b56-4e62-b7cf-2a00fc9cb97c')
self.vs[42]["name"] = """eq4"""
self.vs[42]["mm__"] = """Equation"""
self.vs[42]["GUID__"] = UUID('9215c223-9f38-4d4c-9cbe-84632295ccb3')
self.vs[43]["name"] = """eq5"""
self.vs[43]["mm__"] = """Equation"""
self.vs[43]["GUID__"] = UUID('6ef66628-5bbc-421f-9143-da57c8ae2b3a')
self.vs[44]["name"] = """eq6"""
self.vs[44]["mm__"] = """Equation"""
self.vs[44]["GUID__"] = UUID('b306bebe-0c24-43c7-9aa4-03adb32cfa6c')
self.vs[45]["mm__"] = """leftExpr"""
self.vs[45]["GUID__"] = UUID('078ee05e-91ae-41c5-85dd-39063a9ffb2c')
self.vs[46]["mm__"] = """leftExpr"""
self.vs[46]["GUID__"] = UUID('363fa974-a805-48f4-9847-e0b2ec143b95')
self.vs[47]["mm__"] = """leftExpr"""
self.vs[47]["GUID__"] = UUID('6bd64e10-bbce-49eb-8182-90791435a253')
self.vs[48]["mm__"] = """leftExpr"""
self.vs[48]["GUID__"] = UUID('74c5bf8d-4dde-43f2-828f-c65199149079')
self.vs[49]["mm__"] = """leftExpr"""
self.vs[49]["GUID__"] = UUID('bcbf3c36-6a5b-4104-b8c0-81c1d0cb5a8d')
self.vs[50]["mm__"] = """leftExpr"""
self.vs[50]["GUID__"] = UUID('f00f4772-96b0-4b17-8f17-2aff80d8791f')
self.vs[51]["name"] = """true"""
self.vs[51]["mm__"] = """Constant"""
self.vs[51]["Type"] = """'Bool'"""
self.vs[51]["GUID__"] = UUID('aa093a42-3a6f-4922-990e-2c6330548f18')
self.vs[52]["name"] = """B"""
self.vs[52]["mm__"] = """Constant"""
self.vs[52]["Type"] = """'String'"""
self.vs[52]["GUID__"] = UUID('6f0c44af-0d82-4815-b6ee-add57e2868f9')
self.vs[53]["name"] = """sh_in"""
self.vs[53]["mm__"] = """Constant"""
self.vs[53]["Type"] = """'String'"""
self.vs[53]["GUID__"] = UUID('dd1070e6-dcb4-4a88-8080-d80a17920742')
self.vs[54]["name"] = """sh_in"""
self.vs[54]["mm__"] = """Constant"""
self.vs[54]["Type"] = """'String'"""
self.vs[54]["GUID__"] = UUID('cb3a80a8-be67-4725-8b8a-054291eee685')
self.vs[55]["name"] = """localdefcompstate"""
self.vs[55]["mm__"] = """Constant"""
self.vs[55]["Type"] = """'String'"""
self.vs[55]["GUID__"] = UUID('97a1b5a3-fad3-44ed-921f-00e4a80f0eab')
self.vs[56]["name"] = """parexitpoint"""
self.vs[56]["mm__"] = """Constant"""
self.vs[56]["Type"] = """'String'"""
self.vs[56]["GUID__"] = UUID('c4ff3de8-2fad-45ba-b68a-f56ee7645390')
self.vs[57]["name"] = """isComposite"""
self.vs[57]["mm__"] = """Attribute"""
self.vs[57]["Type"] = """'Bool'"""
self.vs[57]["GUID__"] = UUID('cf5d178a-8ede-4d08-8e9b-624d1c7dd450')
self.vs[58]["name"] = """name"""
self.vs[58]["mm__"] = """Attribute"""
self.vs[58]["Type"] = """'String'"""
self.vs[58]["GUID__"] = UUID('7a794fe1-5113-43ca-a4dd-f38021fa612b')
self.vs[59]["name"] = """name"""
self.vs[59]["mm__"] = """Attribute"""
self.vs[59]["Type"] = """'String'"""
self.vs[59]["GUID__"] = UUID('9bc9d43e-ec8d-4e41-9533-9cbac2c5f30e')
self.vs[60]["name"] = """literal"""
self.vs[60]["mm__"] = """Attribute"""
self.vs[60]["Type"] = """'String'"""
self.vs[60]["GUID__"] = UUID('99fdc4b3-4793-4cec-8fab-bb50911c7677')
self.vs[61]["name"] = """channel"""
self.vs[61]["mm__"] = """Attribute"""
self.vs[61]["Type"] = """'String'"""
self.vs[61]["GUID__"] = UUID('6182d17d-3b01-4123-878c-20974483cf0d')
self.vs[62]["name"] = """pivot"""
self.vs[62]["mm__"] = """Attribute"""
self.vs[62]["Type"] = """'String'"""
self.vs[62]["GUID__"] = UUID('30ce1a1c-946d-498c-be97-deeced6db219')
self.vs[63]["name"] = """pivot"""
self.vs[63]["mm__"] = """Attribute"""
self.vs[63]["Type"] = """'String'"""
self.vs[63]["GUID__"] = UUID('89268a57-1bee-4910-abb8-5ae26d960406')
| 2.390625 | 2 |
videoSummarizer/summarize/models.py | talsperre/LectureSummarizer | 4 | 12790360 | from django.db import models
from django.utils import timezone
from django.forms import ModelForm
from django.contrib.auth.models import User
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.exceptions import ValidationError
import secrets
import os
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
email_confirmed = models.BooleanField(default=False)
email = models.EmailField(max_length=254, default = "")
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
def file_size(value):
limit = 100 * 1024 * 1024
print("Value Size: ", value.size)
if value.size > limit:
raise ValidationError('File too large. Size should not exceed 100 MB.')
def user_directory_path(instance, filename):
return os.path.join('videos', secrets.token_urlsafe(64) + '.mp4')
class Video(models.Model):
UserID = models.ForeignKey(User, on_delete=models.CASCADE, default=1)
VideoPath = models.FileField(upload_to=user_directory_path, validators=[file_size], null=True, verbose_name="",)
Name = models.CharField(max_length=400)
def __str__(self):
return "Video: " + str(self.VideoPath)
class Split(models.Model):
SplitPath = models.CharField(max_length=400)
def __str__(self):
return str(str(self.id) + ":" + self.SplitPath)
class VideoSplit(models.Model):
VideoID = models.ForeignKey(Video, on_delete=models.CASCADE)
SplitID = models.ForeignKey(Split, on_delete=models.CASCADE)
def __str__(self):
return str(self.VideoID)
class Meta:
unique_together = (('VideoID', 'SplitID'),)
class SplitTranscript(models.Model):
SplitID = models.OneToOneField(Split, on_delete=models.CASCADE, primary_key=True)
Transcript = models.TextField()
def __str__(self):
return self.Transcript
class SplitSpeech(models.Model):
SplitID = models.OneToOneField(Split, on_delete=models.CASCADE, primary_key=True)
SpeechPath = models.TextField()
def __str__(self):
return str(self.SpeechPath)
class SplitSummary(models.Model):
SplitID = models.ForeignKey(Split, on_delete=models.CASCADE)
Summary = models.TextField()
def __str__(self):
return str(self.Summary)
class SplitTag(models.Model):
SplitID = models.ForeignKey(Split, on_delete=models.CASCADE)
Tag = models.TextField()
def __str__(self):
return str(self.Tag)
| 2.15625 | 2 |
testing/misc/test1.py | lagvier/echo-sense | 0 | 12790361 | <reponame>lagvier/echo-sense
import sys
from os import path
import numpy as np
ts = [1467038416442, 1467038416452, 1467038416462, 1467038416472, 1467038416482, 1467038416492, 1467038416502, 1467038416512, 1467038416522, 1467038416532]
y = [0, 0, 1, 1, 1, 1, 0, 0, 0, None]
| 1.773438 | 2 |
cs15211/KEmptySlots.py | JulyKikuAkita/PythonPrac | 1 | 12790362 | __source__ = 'https://leetcode.com/problems/k-empty-slots/'
# Time: O()
# Space: O()
#
# Description: Leetcode # 683. K Empty Slots
#
#There is a garden with N slots. In each slot, there is a flower. The N flowers will bloom one by one in N days.
# In each day, there will be exactly one flower blooming and it will be in the status of blooming since then.
#
# Given an array flowers consists of number from 1 to N.
# Each number in the array represents the place where the flower will open in that day.
#
# For example, flowers[i] = x means that the unique flower that blooms at day i will be at position x,
# where i and x will be in the range from 1 to N.
#
# Also given an integer k, you need to output in which day there exists two flowers in the status of blooming,
# and also the number of flowers between them is k and these flowers are not blooming.
#
# If there isn't such day, output -1.
#
# Example 1:
# Input:
# flowers: [1,3,2]
# k: 1
# Output: 2
# Explanation: In the second day, the first and the third flower have become blooming.
#
# Example 2:
# Input:
# flowers: [1,2,3]
# k: 1
# Output: -1
# Note:
# The given array will be in the range [1, 20000].
#
# Companies
# Google
# Related Topics
# Array
#
import unittest
class Solution(object):
pass # your function here
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/k-empty-slots/solution/
It seems that this question has some mistakes. I think there are two places that might lead to misunderstandings:
(please feel free to tell me if I'm incorrect)
flowers[i] = x should mean that the unique flower that blooms at day i+1 (not i) will be at position x.
If you can get multiple possible results, then you need to return the minimum one.
The idea is to use an array days[] to record each position's flower's blooming day.
That means days[i] is the blooming day of the flower in position i+1.
We just need to find a subarray days[left, left+1,..., left+k-1, right] which satisfies:
for any i = left+1,..., left+k-1, we can have days[left] < days[i] && days[right] < days[i].
Then, the result is max(days[left], days[right]).
# 112ms 10.06%
class Solution {
public int kEmptySlots(int[] flowers, int k) {
TreeSet<Integer> blooming = new TreeSet<>();
int day = 0;
for (int slot: flowers) {
day++;
blooming.add(slot);
for (Integer neighbor : new Integer[]{blooming.lower(slot), blooming.higher(slot)}){
if (neighbor != null && Math.abs(neighbor - slot) - 1 == k) return day;
}
}
return -1;
}
}
# 8ms 98%
class Solution {
public int kEmptySlots(int[] flowers, int k) {
if(k < 0 || k > flowers.length - 2) {
return -1;
}
k++;
int[] mins = new int[flowers.length / k + 3];
int[] maxs = new int[mins.length];
Arrays.fill(mins, Integer.MAX_VALUE);
Arrays.fill(maxs, Integer.MIN_VALUE);
for(int i = 0; i < flowers.length; i++) {
int flower = flowers[i];
int index = flower / k + 1;
if(flower < mins[index]) {
mins[index] = flower;
if(maxs[index - 1] + k == flower) {
return i + 1;
}
}
if(flower > maxs[index]) {
maxs[index] = flower;
if(flower + k == mins[index + 1]) {
return i + 1;
}
}
}
return -1;
}
}
''' | 4.03125 | 4 |
rainy/replay/base.py | kngwyu/Rainy | 37 | 12790363 | <filename>rainy/replay/base.py
from abc import ABC, abstractmethod
from typing import Generic, List, Type, TypeVar
ReplayFeed = TypeVar("ReplayFeed")
class ReplayBuffer(ABC, Generic[ReplayFeed]):
def __init__(self, feed: Type[ReplayFeed], allow_overlap: bool = False) -> None:
self.feed = feed
self.allow_overlap = allow_overlap
@abstractmethod
def append(self, *args) -> None:
pass
@abstractmethod
def sample(self, batch_size: int) -> List[ReplayFeed]:
pass
@abstractmethod
def __len__(self):
pass
| 2.703125 | 3 |
app/wda.py | shucheng-ai/WDA-web-server | 0 | 12790364 | #!/usr/bin/env python3
# coding:utf-8
"""
配置 wda-auth && wda-cloud
"""
import datetime
from config import DEPLOY, AUTH_DB_HOST, AUTH_DB_PORT, AUTH_DB_USERNAME, AUTH_DB_PASSWORD, DB_HOST, DB_PORT, \
DB_USERNAME, DB_PASSWORD
if DEPLOY == 1:
from wda_decorators.wda import WDA as WDA_AUTH
from wda_model.wda_model.wda import WDA as WDA_MODEL
wdaauth = WDA_AUTH(
db_host=AUTH_DB_HOST,
db_port=AUTH_DB_PORT,
db_username=AUTH_DB_USERNAME,
db_password=<PASSWORD>,
)
wdamodel = WDA_MODEL(
db_host=DB_HOST,
db_port=DB_PORT,
db_username=DB_USERNAME,
db_password=<PASSWORD>,
)
def get_projcet_bymodel(project_id):
return wdamodel.project.get(id=project_id, engine=wdamodel.db_engine)
def rename_bymodel(project_id, project_name):
update_props = {
'name': project_name,
'update_date': datetime.datetime.utcnow(),
}
wdamodel.project.update(
engine=wdamodel.db_engine,
id=project_id,
data=update_props
)
wdamodel.layout2_project.update(
engine=wdamodel.db_engine,
id=project_id,
data=update_props
)
def create_project_bymodel(project):
new_project = wdamodel.layout2_project(
id=project.id,
uid=project.uid,
company=project.company,
name=project.name,
username=project.username,
type='',
)
wdamodel.layout2_project.add(new_project, wdamodel.db_engine)
else:
class DEMO(object):
def wda_auth(self, func):
def wda_wrapper(*args, **kwargs):
kwargs["wda_user"] = {
"name": "test",
"uid": -1
}
return func(*args, **kwargs)
wda_wrapper.__name__ = func.__name__
return wda_wrapper
def get_projcet_bymodel(project_id):
return None
def rename_bymodel(project_id, project_name):
return None
def create_project_bymodel(project):
return None
wdaauth = DEMO()
wdamodel = DEMO()
| 2.171875 | 2 |
app/main/views.py | synthiakageni/NEWS-APP | 0 | 12790365 | from flask import render_template,request,redirect,url_for
from . import main
from ..request import get_news
from ..request import get_news, get_news_articles,search_article
# Views
@main.route('/')
def index():
'''
function that returns the index page and its data
'''
#Get popular news
general_news = get_news('general')
sports_news = get_news('sports')
entertainment_news = get_news('entertainment')
health_news = get_news('health')
business_news = get_news('business')
tech_news = get_news('technology')
science_news = get_news('science')
title = 'Home - Welcome to The best News Website Online'
search_article = request.args.get('news_query')
if search_article:
return redirect(url_for('search',article_name=search_article))
else:
return render_template('index.html', title = title, general = general_news, sports = sports_news, entertainment = entertainment_news, health = health_news, business = business_news, technology = tech_news, science = science_news)
@main.route('/articles/<id>')
def articles(id):
'''
View article function that returns the articles in a source
'''
articles = get_news_articles(id)
return render_template('articles.html', id = id, articles = articles)
@main.route('/search/<article_name>')
def search(article_name):
'''
View function to display the search results
'''
article_name_list = article_name.split(" ")
article_name_format = "+".join(article_name_list)
searched_articles = search_article(article_name_format)
title = f'search results for {article_name}'
return render_template('search.html',article = searched_articles)
| 2.96875 | 3 |
bicycleparameters/tests/test_models.py | JRMVV/BicycleParameters | 20 | 12790366 | <reponame>JRMVV/BicycleParameters<filename>bicycleparameters/tests/test_models.py
import numpy as np
from nose.tools import assert_raises
from ..parameter_sets import Meijaard2007ParameterSet
from ..models import Meijaard2007Model
meijaard2007_parameters = { # dictionary of the parameters in Meijaard 2007
'IBxx': 9.2,
'IBxz': 2.4,
'IByy': 11.0,
'IBzz': 2.8,
'IFxx': 0.1405,
'IFyy': 0.28,
'IHxx': 0.05892,
'IHxz': -0.00756,
'IHyy': 0.06,
'IHzz': 0.00708,
'IRxx': 0.0603,
'IRyy': 0.12,
'c': 0.08,
'g': 9.81,
'lam': np.pi/10.0,
'mB': 85.0,
'mF': 3.0,
'mH': 4.0,
'mR': 2.0,
'rF': 0.35,
'rR': 0.3,
'v': 5.0,
'w': 1.02,
'xB': 0.3,
'xH': 0.9,
'zB': -0.9,
'zH': -0.7,
}
def test_Meijaard2007Model(show=True):
parameter_set = Meijaard2007ParameterSet(meijaard2007_parameters, True)
model = Meijaard2007Model(parameter_set)
M, C1, K0, K2 = model.form_reduced_canonical_matrices()
assert M.shape == (2, 2)
assert C1.shape == (2, 2)
assert K0.shape == (2, 2)
assert K2.shape == (2, 2)
M, C1, K0, K2 = model.form_reduced_canonical_matrices(
w=np.linspace(0.5, 1.5, num=5))
assert M.shape == (5, 2, 2)
assert C1.shape == (5, 2, 2)
assert K0.shape == (5, 2, 2)
assert K2.shape == (5, 2, 2)
# only one parameter sweep is allowed at a time
with assert_raises(ValueError):
model.form_reduced_canonical_matrices(w=np.linspace(0.5, 1.5),
v=np.linspace(1, 3))
A, B = model.form_state_space_matrices()
assert A.shape == (4, 4)
assert B.shape == (4, 2)
A, B = model.form_state_space_matrices(w=np.linspace(0.5, 1.5, num=5))
assert A.shape == (5, 4, 4)
assert B.shape == (5, 4, 2)
A, B = model.form_state_space_matrices(v=np.linspace(0, 10, num=10))
assert A.shape == (10, 4, 4)
assert B.shape == (10, 4, 2)
evals, evecs = model.calc_eigen()
assert evals.shape == (4,)
assert evecs.shape == (4, 4)
evals, evecs = model.calc_eigen(g=6.0)
assert evals.shape == (4,)
assert evecs.shape == (4, 4)
evals, evecs = model.calc_eigen(v=np.linspace(0, 10, num=10))
assert evals.shape == (10, 4)
assert evecs.shape == (10, 4, 4)
model.plot_eigenvalue_parts(v=np.linspace(0, 10, num=10))
if show:
import matplotlib.pyplot as plt
plt.show()
| 2 | 2 |
python-algorithm/leetcode/problem_963.py | isudox/nerd-algorithm | 5 | 12790367 | <filename>python-algorithm/leetcode/problem_963.py<gh_stars>1-10
"""963. Minimum Area Rectangle II
https://leetcode.com/problems/minimum-area-rectangle-ii/
Given a set of points in the xy-plane, determine the minimum area of any
rectangle formed from these points, with sides not necessarily parallel
to the x and y axes.
If there isn't any rectangle, return 0.
Note:
1 <= points.length <= 50
0 <= points[i][0] <= 40000
0 <= points[i][1] <= 40000
All points are distinct.
Answers within 10^-5 of the actual value will be accepted as correct.
"""
from typing import List
class Solution:
def min_area_free_rect(self, points: 'List[List[int]]') -> 'float':
def distance(p1, p2):
return (p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2
def is_rect(p1, p2, p3, p4):
center = [(p1[0] + p2[0] + p3[0] + p4[0]) / 4,
(p1[1] + p2[1] + p3[1] + p4[1]) / 4]
dist_1 = distance(p1, center)
dist_2 = distance(p2, center)
dist_3 = distance(p3, center)
dist_4 = distance(p4, center)
return dist_1 == dist_2 and dist_2 == dist_3 and dist_3 == dist_4
def triangle_area(p1, p2, p3):
return abs(
p1[0] * p2[1] + p2[0] * p3[1] + p3[0] * p1[1] - p1[0] * p3[1] -
p2[0] * p1[1] - p3[0] * p2[1]) / 2
def rect_area(p1, p2, p3, p4):
return triangle_area(p1, p2, p3) + triangle_area(p2, p3, p4)
ans = float('inf')
length = len(points)
if length < 4:
return 0.00000
i = 0
while i < length - 3:
j = i + 1
while j < length - 2:
k = j + 1
while k < length - 1:
l = k + 1
while l < length:
if is_rect(points[i], points[j], points[k], points[l]):
cur_area = rect_area(points[i], points[j],
points[k], points[l])
ans = min(ans, cur_area)
l += 1
k += 1
j += 1
i += 1
return ans if ans < float('inf') else 0.00000
| 3.671875 | 4 |
my_site/migrations/0013_auto_20181018_1820.py | sch841466053/web | 0 | 12790368 | <reponame>sch841466053/web
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-10-18 10:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('my_site', '0012_comments_time'),
]
operations = [
migrations.AlterField(
model_name='comments',
name='time',
field=models.CharField(max_length=32, null=True),
),
]
| 1.453125 | 1 |
django_migration_utils/rename_table.py | mcldev/django-migration-utils | 0 | 12790369 |
def fwd_rename_app(apps, schema_editor, apps_to_rename):
for old_appname, new_appname in apps_to_rename:
# Renaming model from 'Foo' to 'Bar'
schema_editor.execute("UPDATE django_migrations SET app_name = %s WHERE app_name = %s", [new_appname, old_appname])
schema_editor.execute("UPDATE django_content_type SET app_label = %s WHERE app_label = %s", [new_appname, old_appname])
new_app = apps.get_app_config(new_appname)
app_models = new_app.get_models(include_auto_created=True)
for model in app_models:
if model._meta.proxy == True:
continue
new_table_name = model._meta.db_table
old_table_name = old_appname + new_table_name[len(new_appname):]
schema_editor.alter_db_table(old_table_name, new_table_name)
def back_rename_app(apps, schema_editor, apps_to_rename):
for old_appname, new_appname in apps_to_rename:
# Renaming model back from 'Bar' to 'Foo'
schema_editor.execute("UPDATE django_migrations SET app_name = %s WHERE app_name = %s", [old_appname, new_appname])
schema_editor.execute("UPDATE django_content_type SET app_label = %s WHERE app_label = %s", [old_appname, new_appname])
new_app = apps.get_app_config(new_appname)
app_models = new_app.get_models(include_auto_created=True)
for model in app_models:
if model._meta.proxy == True:
continue
old_table_name = model._meta.db_table
new_table_name = old_appname + old_table_name[len(new_appname):]
schema_editor.alter_db_table(old_table_name, new_table_name)
| 2.34375 | 2 |
hostlists/plugins/plugintype.py | williamjoy/hostlists | 13 | 12790370 | #!/usr/bin/env python
"""
hostlists plugin to recursively query plugins based on type.
This makes it possible to obtain lists of hosts by recursively
querying multiple backends.
For example:
* Query dns for www.foo.com
* Get a list of two hostnames back haproxy1.ny.foo.com and
haproxy1.lax.foo.com.
* Query reverse proxies and load balancers for the
above two hostnames and the names of any hosts serving
the traffic for them. haproxy1.ny.foo.com is a vip being
served by apache1.ny.foo.com ad apache2.ny.foo.com.
haproxy1.lax.foo.com is a vip being serviced by
apache2.lax.foo.com, apache3.lax.foo.com and
joesdesktop.foo.com.
* Return apache[1-2].ny.foo.com, apache[2-3].lax.foo.com,
joesdektop.foo.com
"""
# Copyright (c) 2010-2015 Yahoo! Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License. See accompanying LICENSE file.
from hostlists.plugin_manager import get_plugins
def name():
return ['type', 'type_vip', 'type_vip_up', 'type_vip_down']
def expand(value, name=None):
""" Try all plugins of a specific type for a result, if none
are able to expand the value further then return just the value """
mod_type = 'vip'
if not name:
return [value]
if name.lower() in ['type_vip']:
mod_type = 'vip'
filter_append = ''
if name.lower() in ['type_vip_down']:
mod_type = 'vip_down'
filter_append = '_down'
if name.lower() in ['type_vip_up']:
mod_type = 'vip_up'
filter_append = '_up'
plugins = get_plugins()
for plugin_name in plugins.keys():
if (
(filter_append != '' and plugin_name.endswith(filter_append)) or (filter_append == '' and plugin_name.find('_') == -1)
):
try:
if mod_type in plugins[plugin_name].type():
name = plugin_name + filter_append
result = plugins[plugin_name].expand(value, name=name)
if len(result):
return result
except AttributeError:
pass
return [value]
| 3.234375 | 3 |
test/test_instructions/test_stack_instructions.py | ronyhe/pyjvm | 15 | 12790371 | <gh_stars>10-100
from pyjvm.core.actions import Pop, DuplicateTop, Push
from pyjvm.core.jvm_types import Integer, Long
from test.utils import assert_incrementing_instruction, SOME_INT
def _create_integers(amount):
for i in range(amount):
yield Integer.create_instance(i)
def _translate_stack_string(text):
def elem(e):
if e == '1':
type_ = Integer
elif e == '2':
type_ = Long
else:
raise ValueError()
return type_.create_instance(4)
return [elem(c) for c in text]
def _duplication_test(instruction, stack_string, amount_to_take, index_for_insertion):
assert_incrementing_instruction(
instruction=instruction,
op_stack=_translate_stack_string(stack_string),
expected=[
DuplicateTop(amount_to_take=amount_to_take, index_for_insertion=index_for_insertion)
]
)
def _duplication_tests(specs):
for spec in specs:
_duplication_test(*spec)
def test_duplications():
_duplication_tests((
('dup', '1', 1, 1),
('dup_x1', '11', 1, 2),
('dup_x2', '111', 1, 3),
('dup_x2', '12', 1, 2),
('dup2', '11', 2, 2),
('dup2', '2', 1, 1),
('dup2_x1', '111', 2, 3),
('dup2_x1', '21', 1, 2),
('dup2_x2', '1111', 2, 4),
('dup2_x2', '211', 1, 3),
('dup2_x2', '112', 2, 3),
('dup2_x2', '22', 1, 2)
))
def test_pop():
assert_incrementing_instruction(
instruction='pop',
op_stack=[SOME_INT],
expected=[Pop()]
)
def test_swap():
first, second = _create_integers(2)
assert_incrementing_instruction(
instruction='swap',
op_stack=[first, second],
expected=[
Pop(2),
Push(first),
Push(second)
]
)
| 2.4375 | 2 |
IreneUtility/util/u_biasgame.py | MujyKun/IreneUtility | 1 | 12790372 | import asyncio
from PIL import Image
from ..Base import Base
class BiasGame(Base):
def __init__(self, *args):
super().__init__(*args)
async def create_bias_game_image(self, first_idol_id, second_idol_id):
"""Uses thread pool to create bias game image to prevent IO blocking."""
# (self.ex.thread_pool.submit(self.merge_images, first_idol_id, second_idol_id)).result()
await self.ex.run_blocking_code(self.merge_images, first_idol_id, second_idol_id)
return f"{ self.ex.keys.bias_game_location}{first_idol_id}_{second_idol_id}.png"
def merge_images(self, first_idol_id, second_idol_id):
"""Merge Idol Images if the merge doesn't exist already."""
file_name = f"{first_idol_id}_{second_idol_id}.png"
if not self.ex.check_file_exists(f"{self.ex.keys.bias_game_location}{file_name}"):
# open the images.
with Image.open(f'{self.ex.keys.bias_game_location}versus.png') as versus_image, \
Image.open(f'{self.ex.keys.idol_avatar_location}{first_idol_id}_IDOL.png') as first_idol_image, \
Image.open(f'{self.ex.keys.idol_avatar_location}{second_idol_id}_IDOL.png') as second_idol_image:
# define the dimensions
idol_image_width = 150
idol_image_height = 150
first_image_area = (0, 0)
second_image_area = (versus_image.width - idol_image_width, 0)
image_size = (idol_image_width, idol_image_height)
# resize the idol images
first_idol_image = first_idol_image.resize(image_size)
second_idol_image = second_idol_image.resize(image_size)
# add the idol images onto the VS image.
versus_image.paste(first_idol_image, first_image_area)
versus_image.paste(second_idol_image, second_image_area)
# save the versus image.
versus_image.save(f"{self.ex.keys.bias_game_location}{file_name}")
async def create_bias_game_bracket(self, all_games, user_id, bracket_winner):
# (self.ex.thread_pool.submit(self.create_bracket, all_games, user_id, bracket_winner)).result()
await self.ex.run_blocking_code(self.create_bracket, all_games, user_id, bracket_winner)
return f"{self.ex.keys.bias_game_location}{user_id}.png"
def create_bracket(self, all_games, user_id, bracket_winner):
def resize_images(first_img, second_img, first_img_size, second_img_size):
return first_img.resize(first_img_size), second_img.resize(second_img_size)
def paste_image(first_idol_img, second_idol_img, first_img_area, second_img_area):
bracket.paste(first_idol_img, first_img_area)
bracket.paste(second_idol_img, second_img_area)
with Image.open(f'{self.ex.keys.bias_game_location}bracket8.png') as bracket:
count = 1
for c_round in all_games:
if len(c_round) > 4:
continue
for first_idol, second_idol in c_round:
first_idol_info = self.ex.cache.stored_bracket_positions.get(count)
second_idol_info = self.ex.cache.stored_bracket_positions.get(count + 1)
with Image.open(f'{self.ex.keys.idol_avatar_location}{first_idol.id}_IDOL.png') as first_idol_image, \
Image.open(f'{self.ex.keys.idol_avatar_location}{second_idol.id}_IDOL.png') as second_idol_image:
# resize images
first_idol_image, second_idol_image = resize_images(first_idol_image, second_idol_image,
first_idol_info.get('img_size'),
second_idol_info.get('img_size'))
# paste image to bracket
paste_image(first_idol_image, second_idol_image, first_idol_info.get('pos'),
second_idol_info.get('pos'))
count = count + 2
# add winner
idol_info = self.ex.cache.stored_bracket_positions.get(count)
with Image.open(f'{self.ex.keys.idol_avatar_location}{bracket_winner.id}_IDOL.png') as idol_image:
idol_image = idol_image.resize(idol_info.get('img_size'))
bracket.paste(idol_image, idol_info.get('pos'))
bracket.save(f"{self.ex.keys.bias_game_location}{user_id}.png")
| 2.984375 | 3 |
make-csv.py | mlizbeth/AdobeCSV | 0 | 12790373 | <reponame>mlizbeth/AdobeCSV
import tabula
import pandas
import re
df = tabula.read_pdf('1.pdf', encoding='utf-8', pages='1-2')
temp = pandas.concat(df)
temp.to_csv('output.csv', encoding='utf-8', index=False)
| 2.734375 | 3 |
early_projects/test_prefixes.py | JSBCCA/pythoncode | 0 | 12790374 | <filename>early_projects/test_prefixes.py
import unittest
import prefixes
class TestPrefixes(unittest.TestCase):
"""Tests prefixes."""
def test_above_freezing_above(self):
"""Test a temperature that is above freezing."""
expected = True
actual = temperature.above_freezing(5.2)
self.assertEqual(expected, actual,
"The temperature is above freezing.")
unittest.main()
| 3.34375 | 3 |
dbx_api_primer/app.py | posita/dropbox-api-primer | 0 | 12790375 | # -*-mode: python; encoding: utf-8; test-case-name: tests.test_app-*-
# ========================================================================
"""
Copyright |(c)| 2017 `Dropbox, Inc.`_
.. |(c)| unicode:: u+a9
.. _`Dropbox, Inc.`: https://www.dropbox.com/
Please see the accompanying ``LICENSE`` and ``CREDITS`` file(s) for
rights and restrictions governing use of this software. All rights not
expressly waived or licensed are reserved. If such a file did not
accompany this software, then please contact the author before viewing
or using this software in any capacity.
"""
# ========================================================================
from __future__ import (
absolute_import, division, print_function, unicode_literals,
)
from builtins import * # noqa: F401,F403; pylint: disable=redefined-builtin,unused-wildcard-import,useless-suppression,wildcard-import
from future.builtins.disabled import * # noqa: F401,F403; pylint: disable=redefined-builtin,unused-wildcard-import,useless-suppression,wildcard-import
# ---- Imports -----------------------------------------------------------
from future.moves.urllib.parse import urljoin
from future.utils import bytes_to_native_str
import hashlib
import hmac
import os
import sqlite3
import dropbox
import flask
# Used for the tutorial
import datetime # noqa: F401; pylint: disable=unused-import
import humanize # noqa: F401; pylint: disable=unused-import
# ---- Constants ---------------------------------------------------------
__all__ = ()
_SESSION_USER_ID = 'user-id'
_SESSION_DBX_AUTH_STATE = 'dbx-auth-state'
_APP = flask.Flask(__name__)
# ---- Functions ---------------------------------------------------------
# ========================================================================
@_APP.route('/', methods=( 'GET', 'POST' ))
def route_():
db = get_db()
user_id = flask.session.get(_SESSION_USER_ID)
user_dbx_acct_entry = None
if user_id is not None:
user_dbx_acct_entry = db_user_dbx_acct_select_one_by_user_id(db, user_id)
if user_dbx_acct_entry is None:
# They have a stale user ID, but we don't know why, so just
# treat them as a new browser
user_id = None
flask.session.pop(_SESSION_USER_ID, None)
if flask.request.method == 'GET':
# This displays the main page, which changes based on whether
# the session contains a valid user ID
template_vars = {
'title': _APP.config['SITE_TITLE'],
}
if user_dbx_acct_entry is not None:
user_name = user_dbx_acct_entry[bytes_to_native_str(b'user_name')]
user_email = user_dbx_acct_entry[bytes_to_native_str(b'user_email')]
template_vars['user_name'] = user_name
if user_email is not None:
template_vars['user_email'] = user_email
# TODO: Maybe we should do something fun here?
return flask.render_template('settings.html', **template_vars)
elif flask.request.method == 'POST':
action = flask.request.form.get('action')
if action == 'enable':
# Start the auth flow
return flask.redirect(flask.url_for('route_start'))
elif action == 'disable':
# We need to try to revoke all the tokens we have and clear
# this session. See WARNING comment in ``route_finish``.
if user_dbx_acct_entry is not None:
dbx_acct_id = user_dbx_acct_entry[bytes_to_native_str(b'dbx_acct_id')]
db_user_update_for_delete_by_dbx_acct_id(db, dbx_acct_id)
for user_entry in db_user_select_all_deleted_by_dbx_acct_id(db, dbx_acct_id):
dbx_auth_token = user_entry[bytes_to_native_str(b'dbx_auth_token')]
dbx = dropbox.Dropbox(dbx_auth_token)
try:
dbx.auth_token_revoke()
except dropbox.exceptions.AuthError:
# Token is already revoked
_APP.logger.info('token "%s" already revoked', dbx_auth_token)
user_id = user_entry[bytes_to_native_str(b'user_id')]
db_user_delete(db, user_id)
db.commit()
flask.session.pop(_SESSION_USER_ID, None)
return flask.redirect(flask.url_for('route_'))
else:
flask.abort(400) # bad request
# ========================================================================
@_APP.route('/finish')
def route_finish():
# This is basically modified from the example code at
# <http://dropbox-sdk-python.readthedocs.io/en/master/moduledoc.html#dropbox.oauth.DropboxOAuth2Flow>
auth_flow = _new_dbx_auth_flow(flask.session)
try:
auth_res = auth_flow.finish(flask.request.args)
except dropbox.oauth.BadRequestException:
flask.abort(400)
except dropbox.oauth.BadStateException:
# Start the auth flow again
return flask.redirect(flask.url_for('route_start'))
except dropbox.oauth.CsrfException:
flask.abort(403)
except dropbox.oauth.NotApprovedException:
flask.abort(401)
except dropbox.oauth.ProviderException as exc:
_APP.logger.info('auth error: %s', exc)
flask.abort(403)
# Compare our saved random state with what comes back from Dropbox
dbx_auth_state = flask.session.pop(_SESSION_DBX_AUTH_STATE, None)
if dbx_auth_state is None \
or auth_res.url_state != dbx_auth_state:
_APP.logger.info('browser state (%s) does not equal returned state (%s)', dbx_auth_state, auth_res.url_state)
flask.abort(403)
# Brilliant! Now we can DO stuff!
dbx_auth_token = auth_res.access_token
dbx_acct_id = auth_res.account_id
# TODO: Maybe now that we have an auth token, we can retrieve the
# user's Dropbox account name and e-mail using the API?
user_name = '<USE THE API TO RETRIEVE ME!>'
user_email = None
# Fake a secure-ish user ID and save the new user. See warning below.
user_id_seed = bytes(dbx_acct_id, encoding='utf-8') + os.urandom(24)
user_id = hashlib.sha256(user_id_seed).hexdigest()
db = get_db()
try:
db_dbx_acct_insert(db, dbx_acct_id, user_name, user_email)
except sqlite3.IntegrityError:
# The user's account record is already there, so we update the
# name and e-mail to the latest
db_dbx_acct_update(db, dbx_acct_id, user_name, user_email)
db_user_insert(db, user_id, dbx_acct_id, dbx_auth_token)
db.commit()
# WARNING: This is just to make our demo simpler. Don't ever use Flask
# sessions this way if your want to be #WorthyOfTrust. See
# <https://blog.miguelgrinberg.com/post/how-secure-is-the-flask-user-session>.
#
# Further, even if this WERE secure (which it isn't), this effectively
# treats Dropbox as an identity provider, which we shouldn't do. From
# <https://www.dropbox.com/developers/documentation/http/documentation#authorization>:
#
# Note: OAuth is an authorization protocol, not an authentication
# protocol. Dropbox should not be used as an identity provider.
#
# What we should be doing instead is having logins of our own that
# refer to at most one auth token. Ye have been warned.
flask.session[_SESSION_USER_ID] = user_id
return flask.redirect(flask.url_for('route_'))
# ========================================================================
@_APP.route('/start')
def route_start():
# This is basically modified from the example code at
# <http://dropbox-sdk-python.readthedocs.io/en/master/moduledoc.html#dropbox.oauth.DropboxOAuth2Flow>
dbx_auth_state = hashlib.sha256(os.urandom(24)).hexdigest()
# Save our random state in the browser so we can compare it with what
# comes back from Dropbox later
flask.session[_SESSION_DBX_AUTH_STATE] = dbx_auth_state
auth_url = _new_dbx_auth_flow(flask.session).start(dbx_auth_state)
return flask.redirect(auth_url)
# ========================================================================
@_APP.route('/webhook', methods=( 'GET', 'POST' ))
def route_webhook():
if flask.request.method == 'GET':
return flask.request.args.get('challenge', '')
elif flask.request.method == 'POST':
# Make sure we have a valid request. See
# <https://www.dropbox.com/developers/reference/webhooks#notifications>.
signature = flask.request.headers.get('X-Dropbox-Signature')
expected = hmac.new(_APP.config['DXB_APP_SECRET'], flask.request.data, hashlib.sha256).hexdigest()
if not hmac.compare_digest(signature, expected):
flask.abort(403)
# This is just to make our demo simpler. We shouldn't normally do
# any processing here. From
# <https://www.dropbox.com/developers/reference/webhooks#best-practices>:
#
# Your app only has ten seconds to respond to webhook requests.
# ... To make sure you can always respond within ten seconds,
# you should always do your work on a separate thread ... or
# asynchronously using a queue.
# TODO: What fun things can we do here?
# ========================================================================
def _new_dbx_auth_flow(session):
return dropbox.DropboxOAuth2Flow(
_APP.config['DBX_APP_KEY'],
_APP.config['DBX_APP_SECRET'],
urljoin(_APP.config['BASE_URL'], flask.url_for('route_finish')),
session,
'dbx-auth-csrf-token',
)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Pretty much everything below this point is unrelated to using the
# Dropbox API
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# ========================================================================
@_APP.teardown_appcontext
def close_db(_):
if hasattr(flask.g, 'sqlite_db'):
flask.g.sqlite_db.close()
# ========================================================================
@_APP.cli.command('initdb')
def initdb_command():
init_db()
print('initialized database')
# ========================================================================
def connect_db():
rv = sqlite3.connect(_APP.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
# ========================================================================
def db_dbx_acct_insert(db, dbx_acct_id, user_name, user_email):
db.execute(
"""
INSERT INTO dbx_accts ( dbx_acct_id, user_name, user_email )
VALUES ( ?, ?, ? )
""",
( dbx_acct_id, user_name, user_email ),
)
# ========================================================================
def db_dbx_acct_update(db, dbx_acct_id, user_name, user_email):
db.execute(
"""
UPDATE dbx_accts SET user_name = ?, user_email = ?
WHERE dbx_acct_id = ?
""",
( user_name, user_email, dbx_acct_id ),
)
# ========================================================================
def db_dbx_acct_select(db, dbx_acct_id):
cur = db.execute(
"""
SELECT dbx_acct_id, user_name, user_email
FROM dbx_accts
WHERE dbx_acct_id = ?
""",
( dbx_acct_id, ),
)
return cur.fetchone()
# ========================================================================
def db_user_dbx_acct_select_one_by_user_id(db, user_id):
cur = db.execute(
"""
SELECT
u.user_id AS user_id,
u.dbx_acct_id AS dbx_acct_id,
u.dbx_auth_token AS dbx_auth_token,
da.user_name AS user_name,
da.user_email AS user_email
FROM users AS u
JOIN dbx_accts AS da
ON da.dbx_acct_id = u.dbx_acct_id
WHERE u.user_id = ?
""",
( user_id, ),
)
return cur.fetchone()
# ========================================================================
def db_user_delete(db, user_id):
db.execute(
"""
DELETE FROM users
WHERE user_id = ?
""",
( user_id, ),
)
# ========================================================================
def db_user_insert(db, user_id, dbx_acct_id, dbx_auth_token=None):
db.execute(
"""
INSERT INTO users ( user_id, dbx_acct_id, dbx_auth_token )
VALUES ( ?, ?, ? )
""",
( user_id, dbx_acct_id, dbx_auth_token ),
)
# ========================================================================
def db_user_select_all_deleted_by_dbx_acct_id(db, dbx_acct_id):
cur = db.execute(
"""
SELECT user_id, dbx_acct_id, dbx_auth_token
FROM users
WHERE dbx_acct_id = ?
AND user_id LIKE '%-deleted'
""",
( dbx_acct_id, ),
)
return cur.fetchall()
# ========================================================================
def db_user_update_for_delete_by_dbx_acct_id(db, dbx_acct_id):
db.execute(
"""
UPDATE users
SET user_id = user_id || '-deleted'
WHERE dbx_acct_id = ?
""",
( dbx_acct_id, ),
)
# ========================================================================
def get_db():
if not hasattr(flask.g, 'sqlite_db'):
flask.g.sqlite_db = connect_db()
return flask.g.sqlite_db
# ========================================================================
def init_db():
db = get_db()
with _APP.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# ---- Initialization ----------------------------------------------------
_APP.config.from_object(__name__)
_APP.config.update(dict(
DATABASE=os.path.join(_APP.root_path, 'primer.db'),
SECRET_KEY=bytes_to_native_str(b'__SET_ME__'),
DBX_APP_KEY=bytes_to_native_str(b'__SET_ME__'),
DBX_APP_SECRET=bytes_to_native_str(b'__SET_ME__'),
SITE_TITLE=bytes_to_native_str(b'__SET_ME__'),
BASE_URL=b'http://localhost:5000/',
))
_APP.config.from_envvar('DBX_API_PRIMER_SETTINGS', silent=True)
if _APP.config['SECRET_KEY'] == bytes_to_native_str(b'__SET_ME__'):
_APP.logger.critical('SECRET_KEY must be set')
| 1.6875 | 2 |
linter.py | CudaText-addons/cuda_lint_jslint | 0 | 12790376 | <filename>linter.py
# Written by <NAME>
# Copyright (c) 2013 <NAME>
# License: MIT
# Change for CudaLint: <NAME>.
from cuda_lint import Linter, util
class JSL(Linter):
"""Provides an interface to the jsl executable."""
syntax = 'JavaScript'
cmd = 'jsl -stdin -nologo -nosummary'
version_args = ''
version_re = r'^JavaScript Lint (?P<version>\d+\.\d+\.\d+)'
version_requirement = '>= 0.3.0, < 0.4.0'
regex = r'''(?xi)
# First line is (lineno): type: error message
^\((?P<line>\d+)\):.*?(?:(?P<warning>warning)|(?P<error>error)):\s*(?P<message>.+)$\r?\n
# Second line is the line of code
^.*$\r?\n
# Third line is a caret pointing to the position of the error
^(?P<col>[^\^]*)\^
'''
multiline = True
error_stream = util.STREAM_STDOUT
defaults = {
'-conf:': None
}
selectors = {
'html': 'source.js.embedded.html'
}
| 2.4375 | 2 |
jurassic-journalists/classes.py | HypoT/code-jam-6 | 76 | 12790377 | from PIL import ImageFont
class Letter:
""" letter class- each letter is one of these objects, and is rendered in order. """
def __init__(self,char,size,font,color = (255,255,255,255),b=False,i=False,u=False):
"""
char: character.
size: size of letter.
font: PIL truetype font object. TODO: add handling for other types
color: color of letter, RGBA tuple, range 0-1.
b: Bold flag.
i: Italics flag.
u: Underlined flag.
"""
self.char = char
self.size = size
self.font = ImageFont.truetype(font, size)
self.color = color
self.b = b
self.i = i
self.u = u
def get_kerning(self):
""" gets dimensions as tuple(w,h) that it will be when rendered. """
return self.font.getsize(self.char)
| 3.625 | 4 |
src/kgextractiontoolbox/entitylinking/tagging/dictagger.py | torgbuiedunyenyo/KGExtractionToolbox | 0 | 12790378 | <reponame>torgbuiedunyenyo/KGExtractionToolbox
from collections import defaultdict
import itertools as it
import os
import pickle
import re
from abc import ABCMeta, abstractmethod
from datetime import datetime
from typing import List
from kgextractiontoolbox.config import TMP_DIR, DICT_TAGGER_BLACKLIST
from kgextractiontoolbox.document.document import TaggedDocument, TaggedEntity
from kgextractiontoolbox.entitylinking.tagging.base import BaseTagger
from kgextractiontoolbox.entitylinking.utils import get_document_id, DocumentError
from kgextractiontoolbox.progress import print_progress_with_eta
class DictIndex:
def __init__(self, source_file, tagger_version):
self.source_file, self.tagger_version = source_file, tagger_version
self.desc_by_term = {}
def get_n_tuples(in_list, n):
if n == 0:
return []
for i, element in enumerate(in_list):
if i + n <= len(in_list):
yield in_list[i:i + n]
else:
break
def clean_vocab_word_by_split_rules(word: str) -> str:
if word and re.match(r"[^\w]", word[0]):
word = word[1:]
if word and re.match(r"[^\w]", word[-1]):
word = word[:-1]
return word
def split_indexed_words(content):
words = content.split(' ')
ind_words = []
next_index_word = 0
for word in words:
ind = next_index_word
word_offset = 0
if word and re.match(r"[^\w]", word[0]):
word = word[1:]
ind += 1
word_offset += 1
if word and re.match(r"[^\w]", word[-1]):
word = word[:-1]
word_offset += 1
ind_words.append((word, ind))
# index = last index + length of last word incl. offset
next_index_word = next_index_word + len(word) + word_offset + 1
# For cases like "water-induced" add "water"
amendment = []
for word, index in ind_words:
split = word.split("-")
if len(split) == 2 and split[1][-2:] in {"ed", "et"}:
amendment.append((split[0], index))
ind_words += amendment
return ind_words
class DictTagger(BaseTagger, metaclass=ABCMeta):
PROGRESS_BATCH = 10000
def __init__(self, short_name, long_name, version, tag_types, index_cache, source_file, *args, **kwargs):
super().__init__(*args, **kwargs)
self.tag_types = [tag_types, ]
self.short_name, self.long_name, self.version = short_name, long_name, version
self.index_cache = index_cache
self.source_file = source_file
self.desc_by_term = {}
self.log_file = os.path.join(self.log_dir, f"{short_name}.log") if self.log_dir else None
self.out_dir = os.path.join(self.root_dir, f"{short_name}_out") if self.root_dir else None
self.in_dir = os.path.join(self.root_dir, f"{short_name}_in") if self.root_dir else None
def get_types(self):
return self.tag_types
def _index_from_pickle(self):
if os.path.isfile(self.index_cache):
with open(self.index_cache, 'rb') as f:
index = pickle.load(f)
if not isinstance(index, DictIndex):
self.logger.warning('Ignore index: expect index file to contain an DosageFormTaggerIndexObject: {}'
.format(self.index_cache))
return None
if index.tagger_version != self.version:
self.logger.warning('Ignore index: index does not match tagger version ({} index vs. {} tagger)'
.format(index.tagger_version, self.version))
return None
if index.source_file != self.source_file:
self.logger.warning('Ignore index: index created with another source file ({} index vs. {} tagger)'
.format(index.source_file, self.source_file))
return None
self.logger.debug('Use precached index from {}'.format(self.index_cache))
self.desc_by_term = index.desc_by_term
return index
pass
def _index_to_pickle(self):
index = DictIndex(self.source_file, self.version)
index.desc_by_term = self.desc_by_term
if not os.path.isdir(TMP_DIR):
os.mkdir(TMP_DIR)
self.logger.debug('Storing DosageFormTagerIndex cache to: {}'.format(self.index_cache))
pickle.dump(index, open(self.index_cache, 'wb+'))
@abstractmethod
def _index_from_source(self):
pass
@staticmethod
def get_blacklist_set():
with open(DICT_TAGGER_BLACKLIST) as f:
blacklist = f.read().splitlines()
blacklist_set = set()
for s in blacklist:
s_lower = s.lower()
blacklist_set.add(s_lower)
blacklist_set.add('{}s'.format(s_lower))
blacklist_set.add('{}e'.format(s_lower))
if s_lower.endswith('s') or s_lower.endswith('e'):
blacklist_set.add(s_lower[0:-1])
return blacklist_set
# TODO: synchronization
def prepare(self, resume=False):
if self._index_from_pickle():
self.logger.info(f'{self.long_name} initialized from cache '
f'({len(self.desc_by_term.keys())} term mappings) - ready to start')
else:
self._index_from_source()
blacklist_set = DictTagger.get_blacklist_set()
self.desc_by_term = {k: v for k, v in self.desc_by_term.items() if k.lower() not in blacklist_set}
self._index_to_pickle()
# Create output directory
if self.out_dir:
os.makedirs(self.out_dir, exist_ok=True)
def get_tags(self):
return self._get_tags(self.out_dir)
def run(self):
skipped_files = []
files_total = len(self.files)
start_time = datetime.now()
for idx, in_file in enumerate(self.files):
if in_file.endswith(".txt"):
out_file = os.path.join(self.out_dir, in_file.split("/")[-1])
try:
self._tag(in_file, out_file)
except DocumentError as e:
self.logger.debug("Error in document - will be skipped {}".format(in_file))
skipped_files.append(in_file)
self.logger.info(e)
print_progress_with_eta(f"{self.long_name} tagging", self.get_progress(), files_total, start_time,
print_every_k=self.PROGRESS_BATCH, logger=self.logger)
else:
self.logger.debug("Ignoring {}: Suffix .txt missing".format(in_file))
end_time = datetime.now()
self.logger.info("Finished in {} ({} files processed, {} files total, {} errors)".format(
end_time - start_time,
self.get_progress(),
files_total,
len(skipped_files)),
)
def tag_doc(self, in_doc: TaggedDocument) -> TaggedDocument:
"""
Generate tags for a TaggedDocument
:param in_doc: document containing title+abstract to tag. Is modified by adding tags
:return: the modified in_doc
"""
and_check_range = 5
connector_words = {"and", "or"}
abb_vocab = dict()
out_doc = in_doc
pmid, title, abstact = in_doc.id, in_doc.title, in_doc.abstract
content = title.strip() + " " + abstact.strip()
content = content.lower()
# split into indexed single words
ind_words = split_indexed_words(content)
tags = []
for spaces in range(self.config.dict_max_words):
for word_tuple in get_n_tuples(ind_words, spaces + 1):
words, indexes = zip(*word_tuple)
term = " ".join(words)
if not term:
continue
start = indexes[0]
end = indexes[-1] + len(words[-1])
if start > len(title):
start = start
hits = list(self.generate_tagged_entities(end, pmid, start, term))
tags += hits
if self.config.custom_abbreviations and hits:
match = re.match(r" \(([^\(\)]*)\).*", content[indexes[-1] + len(words[-1]):])
if match:
# strip the abbreviation
abbreviation = match.groups()[0].strip()
abb_vocab[abbreviation] = [(t.ent_type, t.ent_id) for t in hits]
if abb_vocab:
for spaces in range(self.config.dict_max_words):
for word_tuple in get_n_tuples(ind_words, spaces + 1):
hits = self.get_hits(word_tuple, abb_vocab, pmid, title)
tags += hits
if self.config.dict_check_abbreviation:
tags = DictTagger.clean_abbreviation_tags(tags, self.config.dict_min_full_tag_len)
out_doc.tags += tags
return out_doc
def get_hits(self, word_tuple, abb_vocab, pmid, title):
words, indexes = zip(*word_tuple)
term = " ".join(words)
start = indexes[0]
end = indexes[-1] + len(words[-1])
if start > len(title):
start = start - 1
hits = list(self.generate_tagged_entities(end, pmid, start, term, abb_vocab))
return hits
connector_words = {"and", "or"}
@staticmethod
def conjunction_product(token_seq, seperated=False):
"""
split token_seq at last conn_word, return product of all sub token sequences. Exclude connector words.
:param seperated: return left_tuples, right_tuples instead of left_tuples+right_tuples
"""
cwords_indexes = [n for n, (w, i) in enumerate(token_seq) if w in DictTagger.connector_words]
if not cwords_indexes: # or max(cwords_indexes) in [0, len(token_seq)-1]:
return []
left = token_seq[:max(cwords_indexes)]
right = token_seq[max(cwords_indexes):]
left = [(w, i) for w, i in left if w not in DictTagger.connector_words]
right = [(w, i) for w, i in right if w not in DictTagger.connector_words]
left_tuples = [[]] + [t for n in range(0, len(left) + 1) for t in list(get_n_tuples(left, n))]
right_tuples = [[]] + [t for n in range(0, len(right) + 1) for t in list(get_n_tuples(right, n))]
yield from [(lt, rt) for lt, rt in it.product(left_tuples, right_tuples) if lt + rt]
def _tag(self, in_file, out_file):
with open(in_file) as f:
document = f.read()
result = self.tag_doc(TaggedDocument(document))
with open(out_file, "w+") as f:
f.write(str(result))
def generate_tag_lines(self, end, pmid, start, term):
hits = self._get_term(term)
# print(f"Found {hits} for '{term}'")
if hits:
for desc in hits:
yield pmid, start, end, term, self.tag_types[0], desc
def generate_tagged_entities(self, end, pmid, start, term, tmp_vocab=None):
hits = set()
if tmp_vocab:
tmp_hit = tmp_vocab.get(term)
if tmp_hit:
hits |= {hit[1] for hit in tmp_hit}
else:
hits |= set(self._get_term(term))
# print(f"Found {hits} for '{term}'")
if hits:
for desc in hits:
yield TaggedEntity((pmid, start, end, term, self.tag_types[0], desc))
def _get_term(self, term):
hits = self.desc_by_term.get(term)
return {hit for hit in hits if len(hit) >= self.config.dict_min_full_tag_len} if hits else set()
@staticmethod
def clean_abbreviation_tags(tags: List[TaggedEntity], minimum_tag_len=5):
"""
This method removes all tags which are assumed to be an abbreviation and which do not have a long expression
within the document
e.g. Aspirin (ASA) -> ASA is allowed in the document because Aspirin is associated with the same descriptor
without aspirin ASA will further not be kept as a valid tag
:param minimum_tag_len: the minimum tag length to treat a term as a 'full' tag
:param tags: a list of tags
:return: a list of cleaned tags
"""
tags_cleaned = []
desc2tags = defaultdict(list)
for t in tags:
desc2tags[t.ent_id].append(t)
# search if a full tag is found for a descriptor
for desc, tags in desc2tags.items():
keep_desc = False
for t in tags:
if len(t.text) >= minimum_tag_len:
keep_desc = True
break
if keep_desc:
tags_cleaned.extend(tags)
return tags_cleaned
def get_progress(self):
return len([f for f in os.listdir(self.out_dir) if f.endswith(".txt")])
def get_successful_ids(self):
"""
DictTagger doesn't include content in output files, so no id can be retrieved from them if no tags found.
Also, {short_name}_in dir is deleted if finished. Because of that, the ids are looked up in the files in input_dir,
mapping is done via file name.
:return:
"""
finished_filenames = os.listdir(self.out_dir)
finished_ids = {get_document_id(os.path.join(self.input_dir, fn)) for fn in finished_filenames}
return finished_ids
| 2.15625 | 2 |
pyctcdecode/tests/test_language_model.py | wannaphong/pyctcdecode | 0 | 12790379 | <gh_stars>0
# Copyright 2021-present Kensho Technologies, LLC.
import os
import re
import unittest
from hypothesis import given
from hypothesis import strategies as st
import kenlm
from pygtrie import CharTrie
from pyctcdecode.language_model import HotwordScorer, LanguageModel, MultiLanguageModel
CUR_PATH = os.path.dirname(os.path.abspath(__file__))
KENLM_BINARY_PATH = os.path.join(CUR_PATH, "sample_data", "bugs_bunny_kenlm.arpa")
class TestLanguageModel(unittest.TestCase):
def test_match_ptn(self):
hotwords = ["tyrion lannister", "hodor"]
match_ptn = HotwordScorer.build_scorer(hotwords)._match_ptn # pylint: disable=W0212
matched_tokens = match_ptn.findall("i work with hodor and friends")
expected_tokens = ["hodor"]
self.assertListEqual(matched_tokens, expected_tokens)
# sub-ngramming
matched_tokens = match_ptn.findall("we can match tyrion only")
expected_tokens = ["tyrion"]
self.assertListEqual(matched_tokens, expected_tokens)
# bos/eos
matched_tokens = match_ptn.findall("hodor is friends with hodor")
expected_tokens = ["hodor", "hodor"]
self.assertListEqual(matched_tokens, expected_tokens)
# word boundary only defined by space and bos/eos
matched_tokens = match_ptn.findall("do not match hodor, or anything else here")
expected_tokens = []
self.assertListEqual(matched_tokens, expected_tokens)
# punctuation compatibility
hotwords = ["hodor,"]
match_ptn = HotwordScorer.build_scorer(hotwords)._match_ptn # pylint: disable=W0212
matched_tokens = match_ptn.findall("please match hodor, but not hodor")
expected_tokens = ["hodor,"]
self.assertListEqual(matched_tokens, expected_tokens)
def test_trie(self):
hotwords = ["<NAME>", "hodor"]
char_trie = HotwordScorer.build_scorer(hotwords)._char_trie # pylint: disable=W0212
has_token = char_trie.has_node("hod") > 0
self.assertTrue(has_token)
has_token = char_trie.has_node("dor") > 0
self.assertFalse(has_token)
# works for full tokens as well
has_token = char_trie.has_node("hodor") > 0
self.assertTrue(has_token)
# sub-ngramming
has_token = char_trie.has_node("lann") > 0
self.assertTrue(has_token)
# punctuation compatibility
hotwords = ["U.S.A."]
char_trie = HotwordScorer.build_scorer(hotwords)._char_trie # pylint: disable=W0212
has_token = char_trie.has_node("U.S") > 0
self.assertTrue(has_token)
# fuzz tests below generated with `hypothesis write language_model.py` and edited for concision.
class TestFuzzMultiLanguageModel(unittest.TestCase):
@given(
language_models=st.lists(
st.builds(
LanguageModel,
kenlm_model=st.just(kenlm.Model(KENLM_BINARY_PATH)),
alpha=st.one_of(st.just(0.5), st.floats()),
beta=st.one_of(st.just(1.5), st.floats()),
score_boundary=st.one_of(st.just(True), st.booleans()),
unigrams=st.one_of(
st.none(),
st.lists(st.text()),
),
unk_score_offset=st.one_of(st.just(-10.0), st.floats()),
),
),
)
def test_fuzz_MultiLanguageModel(self, language_models):
if len(language_models) >= 2:
MultiLanguageModel(language_models=language_models)
else:
with self.assertRaises(ValueError):
MultiLanguageModel(language_models=language_models)
class TestHotwordScorer(unittest.TestCase):
@given(match_ptn=st.just(re.compile("")), char_trie=st.builds(CharTrie), weight=st.floats())
def test_fuzz_HotwordScorer(self, match_ptn, char_trie, weight):
HotwordScorer(match_ptn=match_ptn, char_trie=char_trie, weight=weight)
@given(
unigrams=st.one_of(
st.none(),
st.lists(st.text()),
),
alpha=st.floats(),
beta=st.floats(),
unk_score_offset=st.floats(),
score_boundary=st.booleans(),
partial_token=st.text(),
)
def test_fuzz_LanguageModel(
self, unigrams, alpha, beta, unk_score_offset, score_boundary, partial_token
):
kenlm_model = kenlm.Model(KENLM_BINARY_PATH)
lm = LanguageModel(
kenlm_model=kenlm_model,
unigrams=unigrams,
alpha=alpha,
beta=beta,
unk_score_offset=unk_score_offset,
score_boundary=score_boundary,
)
lm.score_partial_token(partial_token)
| 2.40625 | 2 |
experiments/analyze_head_pose.py | TechieBoy/deepfake-detection | 0 | 12790380 | <gh_stars>0
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
from sklearn import svm
from sklearn.model_selection import train_test_split
def doPCA(df, n_components=2):
array = df.values
x = array[:,:-1]
y = array[:,-1]
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=n_components)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents)
finalDf = pd.concat([principalDf, pd.DataFrame(y)], axis = 1)
return finalDf
def scatterPlot(df, sample=100):
df = df.sample(sample)
x1 = df.iloc[:, 0]
x2 = df.iloc[:, 1]
y = df.iloc[:, 2]
plt.scatter(x1, x2, c=y)
return plt
def printStats(df):
print("\nClass Distribution")
print(df.groupby( ' Fake' ).size())
print("\nFake data description")
print(df[df[" Fake"]==1].describe())
print("\nReal data description")
print(df[df[" Fake"]==0].describe())
print("\nSkew")
print(df.skew())
print("\nPearsons correlation")
corr = df.corr(method='pearson') # or spearman or kendall
print(corr)
def svmClassifier(df):
print("Data loaded: ", df.shape)
array = df.values
X = array[:,:-1]
Y = array[:,-1]
test_size = 0.33
seed = 7
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
clf = svm.SVC(kernel='linear', class_weight='balanced')
print("Training...")
clf.fit(X_train, Y_train)
result = clf.score(X_test, Y_test)
print("Accuracy:", result)
return clf
if __name__=="__main__":
names = range(7)
df_fake = pd.read_csv("~/deepfake/deepfake-detection/experiments/head_pose/feat/fake.txt", sep=" ", names=names).iloc[:, :-1]
df_real = pd.read_csv("~/deepfake/deepfake-detection/experiments/head_pose/feat/real.txt", sep=" ", names=names).iloc[:, :-1]
df_fake[" Fake"] = 1
df_real[" Fake"] = 0
df = pd.concat([df_fake, df_real], axis=0, ignore_index=True)
# df = doPCA(df)
svmClassifier(df)
# printStats(df)
# scatterPlot(df, 1000)
# plt.show() | 2.953125 | 3 |
src/0_processing_data.py | NationalLimerickProductions/seq2cite | 1 | 12790381 | <reponame>NationalLimerickProductions/seq2cite
"""
Assembling a parsed dataset of citation spans from the CORD-19 data
"""
import csv
import json
import sys
import multiprocessing as mp
from collections import namedtuple
from typing import Union
import pickle
import tarfile
from io import BytesIO
from tqdm import tqdm
import pandas as pd
import numpy as np
import boto3
from seq2cite import config, utils, aws, text
# Number of tokens surrounding the citation to take as context
CONTEXT_SIZE = 30
ARTICLES_FILE = config.processed / 'cord19_articles.csv'
CITATIONS_FILE = config.processed / 'cord19_context_citations.csv'
AUTHOR_VOCAB_FILE = config.processed / 'cord19_author_vocab.json'
TOKEN_VOCAB_FILE = config.processed / 'cord19_token_vocab.json'
TITLE_VOCAB_FILE = config.processed / 'cord19_title_vocab.json'
CITE_TOKEN = '<CITE>'
CITE_IDX = 1
ARTICLES_NAMES = ['cord_uid', 'title', 'authors', 'date', 'journal', 'doi']
CITATIONS_NAMES = ['citation_id', 'context', 'auth_idxs', 'citing_auth_idxs', 'title_idxs']
MIN_DATE = pd.to_datetime('2010-01-01')
KEYS = {'arxiv': '',
'noncomm_use_subset': '2020-04-10',
'biorxiv_medrxiv': '2020-04-17',
'custom_license': '2020-04-10',
'comm_use_subset': '2020-04-10'}
author_vocab = {'<UNK>': 0, '<PAD>': 9999999}
token_vocab = {'<UNK>': 0, '<CITE>': 1, '<PAD>': 9999999}
title_vocab = {'<UNK>': 0, '<CITE>': 1, '<PAD>': 9999999}
curr_author_idx = 1
curr_token_idx = 2
curr_title_idx = 2
def load_metadata(offset=0,
chunk_size=None,
colnames=config.metadata_columns
) -> Union[pd.DataFrame, None]:
header = None if colnames is None else 0
df = pd.read_csv(config.raw / 'metadata.csv',
nrows=chunk_size,
skiprows=offset,
names=colnames,
header=header,
index_col=False)
if len(df) == 0:
return None
df = df[~pd.isna(df['sha'])]
df = df[pd.to_datetime(df['publish_time']) > MIN_DATE]
return df
def load_tar_files():
res = {}
for key, date_ in KEYS.items():
print(f'Loading {key}')
keyfile = config.raw / f'{key}.tar.gz'
try:
content = tarfile.open(keyfile, mode="r:gz")
except tarfile.ReadError:
content = tarfile.open(keyfile)
members = content.getmembers()
member_dict = {m.name.split('/')[-1].rstrip('.json'): m for m in members}
res[key] = {}
res[key]['tarfile'] = content
res[key]['members'] = member_dict
return res
def get_tarfile(tarfiles, subset, sha):
tar_subset = tarfiles.get(subset, None)
if tar_subset is None:
return
content = tarfiles[subset]['tarfile']
member = tarfiles[subset]['members'][sha]
return json.load(content.extractfile(member))
def process_chunk(chunk: pd.DataFrame, tarfiles: dict) -> tuple:
"""Steps in processing a chunk
1. For each article:
a) Extract metadata:
- cord_uid
- Title
- Authors
- Date
- Journal
- DOI
c) Load JSON article
d) Extract all citation spans. Convert each inline citation to a tuple:
(cord_uid, context, (cited_title, cited_authors, cited_date, cited_journal))
2. Returns a tuple of articles, citation_data
:param chunk: Chunk of the metadata
:return: 'articles', 'citation_data'
"""
articles = []
citation_data = []
with tqdm(total=len(chunk)) as pbar:
for row in chunk.itertuples():
cord_uid = row.cord_uid
sha = row.sha.split('; ')[0]
title = row.title
date = row.publish_time
doi = row.doi
journal = row.journal
subset = row.url
jsondict = get_tarfile(tarfiles, subset, sha)
if jsondict is None:
continue
authors = jsondict['metadata'].get('authors')
auth_idxs = get_author_idxs(authors)
context_citations = get_citation_data(cord_uid, jsondict, auth_idxs)
articles.append((cord_uid, title, auth_idxs, date, journal, doi))
citation_data.extend(context_citations)
pbar.update()
return articles, citation_data
def get_author_idxs(authors: list) -> list:
"""Return a list of author idxs for the authors in `authors`.
Adds the authors to the vocab in the process.
:param authors: List of authors (list[str])
:return: 'auth_idxs' (list[int])
"""
global author_vocab
auth_idxs = []
for author in authors:
auth_abbrev = f'{author["first"][:1]} {author["last"]}'
if auth_abbrev in author_vocab:
auth_idx = author_vocab[auth_abbrev]
else:
auth_idx = len(author_vocab)
author_vocab[auth_abbrev] = auth_idx
auth_idxs.append(auth_idx)
return auth_idxs
def get_token_idx(token: str) -> int:
"""Get the token id for a given token, adding it to the vocab if necessary.
Parameters
----------
token {str} -- Token to add to the vocab
Returns
-------
'id' {int}
"""
global curr_token_idx, token_vocab
if token not in token_vocab:
token_vocab[token] = curr_token_idx
curr_token_idx += 1
return token_vocab[token]
def get_title_idx(token: str) -> int:
"""Get the token id for a given title token, adding it to the vocab
if necessary.
Parameters
----------
token {str} -- Token to add to the vocab
Returns
-------
'id' {int}
"""
global curr_title_idx, title_vocab
if token not in title_vocab:
title_vocab[token] = curr_title_idx
curr_title_idx += 1
return title_vocab[token]
def get_citation_data(cord_uid: str, article: dict, citing_auth_idxs: list) -> list:
"""Get the citation data for a given article (in dict format)
:param cord_uid: The UID of the article from the CORD-19 database
:param article: The article to be parsed
:param citing_auth_idxs: The citing authors (encoded)
:return: 'citation_data', a list of tuples:
(cord_uid, context, (cited_title, cited_authors, cited_date, cited_journal))
"""
bib_entries = article['bib_entries']
body_text = article['body_text']
citation_data = []
text_sections = [section['text'] for section in body_text]
text_sections = list(text.nlp.pipe(text_sections))
for section, text_section in zip(body_text, text_sections):
cite_spans = section['cite_spans']
sent_len = len(text_section)
# Need to loop through the citation spans and get the `CONTEXT_SIZE`
# sentences before or including the citation span
for cite_span in cite_spans:
ref_id = cite_span['ref_id']
if not ref_id:
continue
bibref = bib_entries[ref_id]
authors = bibref['authors']
auth_idxs = get_author_idxs(authors)
title = bibref['title']
title_idxs = [get_title_idx(t.lemma_) for t in text.nlp(title)]
# Finding the context
cite_start = cite_span['start']
cite_end = cite_span['end']
idx_start, idx_end = 0, sent_len
for t in text_section:
if t.idx == cite_start:
idx_start = t.i
if t.idx + len(t) == cite_end:
idx_end = t.i
# Fitting the window into the section
context_size_pre, context_size_post = CONTEXT_SIZE / 2, CONTEXT_SIZE / 2
if idx_start < context_size_pre:
context_size_post += idx_start - context_size_pre
context_size_pre = idx_start
if sent_len - idx_end < context_size_post:
context_size_post = sent_len - idx_end
# Getting the context
context_pre = list([get_token_idx(t.lemma_) for t in text_section[idx_start - context_size_pre:idx_start]])
context_post = list([get_token_idx(t.lemma_) for t in text_section[idx_end:idx_end + context_size_post]])
context = context_pre + [CITE_IDX] + context_post
# Packaging it all together
citation_id = f'{cord_uid}__{ref_id}'
datum = (citation_id, context, auth_idxs, citing_auth_idxs, title_idxs)
citation_data.append(datum)
return citation_data
@utils.time_func
def main():
CHUNK_SIZE = 10000
offset = 0
total_articles = 0
total_citations = 0
chunk_idx = -1
fp_articles = ARTICLES_FILE.open('w')
fp_citations = CITATIONS_FILE.open('w')
articles_writer = csv.writer(fp_articles)
citations_writer = csv.writer(fp_citations)
articles_writer.writerow(ARTICLES_NAMES)
citations_writer.writerow(CITATIONS_NAMES)
# Main loop to process in chunks
print("Loading TAR files")
tarfiles = load_tar_files()
print("Beginning processing")
try:
while True:
metadata_chunk = load_metadata(offset, chunk_size=CHUNK_SIZE)
chunk_idx += 1
if metadata_chunk is None:
break
if len(metadata_chunk) == 0:
# print(f'Skipping chunk {chunk_idx} with length 0')
continue
print(f'Processing chunk {chunk_idx}')
all_data = process_chunk(metadata_chunk, tarfiles)
articles, citation_data = all_data
articles_writer.writerows(articles)
citations_writer.writerows(citation_data)
total_articles += len(articles)
total_citations += len(citation_data)
print(f'Processed {len(articles)} articles; Total articles processed: {total_articles}; Total citations processed: {total_citations}')
offset += CHUNK_SIZE
except KeyboardInterrupt:
pass
finally:
print(f"Done. Processed {total_articles} total articles with {total_citations} citations.")
fp_articles.close()
fp_citations.close()
for vocab, file in zip((author_vocab, token_vocab, title_vocab),
(AUTHOR_VOCAB_FILE, TOKEN_VOCAB_FILE, TITLE_VOCAB_FILE)):
idx2vocab = {v: k for k, v in vocab.items()}
with file.open('w') as f:
json.dump(idx2vocab, f)
if __name__ == '__main__':
main()
| 2.515625 | 3 |
rpn/app.py | cleyon/rpn | 1 | 12790382 | '''
#############################################################################
#
# M A I N L O O P & P R I M A R Y F U N C T I O N S
#
#############################################################################
'''
import getopt
import os
import random
import signal
import sys
try:
import numpy as np # pylint: disable=import-error
except ImportError:
pass
# Check if SciPy is available
try:
import scipy.integrate # pylint: disable=import-error
import scipy.optimize # pylint: disable=import-error
except ModuleNotFoundError:
pass
# # Check if Matplotlib is available
# try:
# import matplotlib # pylint: disable=import-error
# except ModuleNotFoundError:
# pass
from rpn.debug import dbg, typename
from rpn.exception import *
import rpn.flag
import rpn.globl
import rpn.tvm
import rpn.type
import rpn.unit
import rpn.util
import rpn.word
disable_all_extensions = False
load_init_file = True
want_debug = False
def usage():
print("""\
Usage: rpn [-d] [-f FILE] [-i] [-l FILE] [-q] [-V] cmds...
-d Enable debugging
-f FILE Load FILE and exit
-i Force interactive mode
-l FILE Load FILE and continue
-q Do not load init file (~/.rpnrc)
-Q Disable all extensions (implies -q)
-V Display version information""")
sys.exit(64) # EX_USAGE
def initialize(rpndir, argv):
global disable_all_extensions # pylint: disable=global-statement
global load_init_file # pylint: disable=global-statement
# Set up low level stuff, stacks, variables
sys.setrecursionlimit(2000) # default is 10002
random.seed()
rpn.globl.push_scope(rpn.globl.root_scope, "Root scope")
rpn.globl.disp_stack.push(rpn.util.DisplayConfig())
rpn.word.w_std('std')
rpn.unit.define_units()
define_variables()
# Set up signal handling
signal.signal(signal.SIGINT, sigint_handler)
signal.signal(signal.SIGQUIT, sigquit_handler)
signal.signal(signal.SIGWINCH, sigwinch_handler)
sigwinch_handler(0, 0) # Read & define ROWS and COLS via stty(1)
# Set initial conditions
rpn.globl.eval_string("clreg clflag clfin")
rpn.flag.set_flag(rpn.flag.F_SHOW_PROMPT)
# Define built-in secondary (protected) words
if not disable_all_extensions:
try:
load_file(os.path.join(rpndir, "secondary.rpn"))
except RuntimeErr as err_f_opt:
rpn.globl.lnwriteln(str(err_f_opt))
sys.exit(1)
# Switch to user mode, where words and variables are no longer
# protected, and define built-in tertiary (non-protected) words
rpn.globl.default_protected = False
if not disable_all_extensions:
try:
load_file(os.path.join(rpndir, "tertiary.rpn"))
except RuntimeErr as err_f_opt:
rpn.globl.lnwriteln(str(err_f_opt))
sys.exit(1)
# Parse command line
argv = parse_args(argv)
# Hopefully load the user's init file
if load_init_file:
init_file = os.path.expanduser("~/.rpnrc")
if os.path.isfile(init_file):
(rpnrc, _) = rpn.globl.lookup_variable('RPNRC')
rpnrc.obj = rpn.type.String(init_file)
load_file(init_file)
# rpn.globl.lnwriteln("--------------------------------")
if len(argv) > 0:
s = " ".join(argv)
rpn.globl.eval_string(s)
if rpn.globl.interactive is None:
rpn.globl.interactive = False
else:
# No command args, so maybe go interactive
if rpn.globl.interactive is None:
rpn.globl.interactive = True
return rpn.globl.interactive
def define_variables():
# Variables defined here are all protected=True by default
rpn.globl.sharpout = rpn.globl.defvar('#OUT', rpn.type.Integer(0),
readonly=True, noshadow=True)
rpn.tvm.CF = rpn.globl.defvar('CF', rpn.type.Integer(1),
noshadow=True,
pre_hooks=[pre_require_int, pre_require_positive],
post_hooks=[post_label_with_identifier],
doc="Compounding Frequency")
rpn.globl.scr_cols = rpn.globl.defvar('COLS', rpn.type.Integer(0),
pre_hooks=[pre_require_int, pre_require_positive])
rpn.tvm.FV = rpn.globl.defvar('FV', None,
noshadow=True,
pre_hooks=[pre_require_int_or_float],
post_hooks=[post_label_with_identifier],
doc="Future Value")
rpn.tvm.INT = rpn.globl.defvar('INT', None,
noshadow=True,
pre_hooks=[pre_require_int_or_float, pre_require_non_negative],
post_hooks=[post_label_with_identifier],
doc="Interest rate")
rpn.tvm.N = rpn.globl.defvar('N', None,
noshadow=True,
pre_hooks=[pre_require_int_or_float, pre_require_positive],
post_hooks=[post_label_with_identifier],
doc="Number of payments")
rpn.globl.defvar('NUMPY', rpn.type.Integer(rpn.globl.bool_to_int(rpn.globl.have_module('numpy'))),
readonly=True, noshadow=True)
if rpn.globl.have_module('numpy'):
rpn.globl.defvar('NUMPY_VER', rpn.type.String(np.__version__),
readonly=True)
rpn.tvm.PF = rpn.globl.defvar('PF', rpn.type.Integer(1),
noshadow=True,
pre_hooks=[pre_require_int, pre_require_positive],
post_hooks=[post_label_with_identifier],
doc="Payment Frequency")
rpn.tvm.PMT = rpn.globl.defvar('PMT', None,
noshadow=True,
pre_hooks=[pre_require_int_or_float],
post_hooks=[post_label_with_identifier],
doc="Payment amount")
rpn.tvm.PV = rpn.globl.defvar('PV', None,
noshadow=True,
pre_hooks=[pre_require_int_or_float],
post_hooks=[post_label_with_identifier],
doc="Present Value")
rpn.globl.scr_rows = rpn.globl.defvar('ROWS', rpn.type.Integer(0),
pre_hooks=[pre_require_int, pre_require_positive])
rpn.globl.defvar('RPNRC', rpn.type.String(""),
readonly=True, hidden=True)
rpn.globl.defvar('SCIPY', rpn.type.Integer(rpn.globl.bool_to_int(rpn.globl.have_module('scipy'))),
readonly=True, noshadow=True)
if rpn.globl.have_module('scipy'):
rpn.globl.defvar('SCIPY_VER', rpn.type.String(scipy.__version__),
readonly=True)
rpn.globl.defvar('SIZE', rpn.type.Integer(20),
noshadow=True,
pre_hooks=[pre_validate_size_arg],
post_hooks=[post_clear_newly_unveiled_registers])
rpn.globl.defvar('Sreg', rpn.type.Integer(11),
pre_hooks=[pre_validate_Sreg_arg])
rpn.globl.defvar('VER', rpn.type.Float(rpn.globl.RPN_VERSION),
readonly=True, noshadow=True)
def parse_args(argv):
global want_debug # pylint: disable=global-statement
global load_init_file # pylint: disable=global-statement
global disable_all_extensions # pylint: disable=global-statement
try:
opts, argv = getopt.getopt(argv, "dDf:il:qQV")
except getopt.GetoptError as e:
print(str(e)) # OK
usage()
for opt, arg in opts:
if opt == "-d": # Sets debug only when main_loop is ready
want_debug = True
elif opt == "-D":
rpn.flag.set_flag(rpn.flag.F_DEBUG_ENABLED) # Debug immediately, useful for built-in words
elif opt == "-f":
if rpn.globl.interactive is None:
rpn.globl.interactive = False
try:
load_file(arg)
except RuntimeErr as err_f_opt:
rpn.globl.lnwriteln(str(err_f_opt))
elif opt == "-i":
rpn.globl.interactive = True
elif opt == "-l":
try:
load_file(arg)
except RuntimeErr as err_l_opt:
rpn.globl.lnwriteln(str(err_l_opt))
elif opt == "-q":
load_init_file = False
elif opt == "-Q":
load_init_file = False
disable_all_extensions = True
elif opt == "-V":
rpn.globl.show_version_info()
if rpn.globl.interactive is None:
rpn.globl.interactive = False
else:
print("Unhandled option {}".format(opt)) # OK
sys.exit(1)
return argv
def load_file(filename):
fn = filename
if not os.path.isfile(fn):
fn += ".rpn"
if not os.path.isfile(fn):
throw(X_NON_EXISTENT_FILE, "load", filename)
try:
with open(fn, "r") as file:
contents = file.read()
except PermissionError as e:
throw(X_FILE_IO, "load", "Cannot open file '{}'".format(fn))
else:
dbg("load_file", 3, "load_file({})='{}'".format(fn, contents))
rpn.globl.eval_string(contents)
def main_loop():
global want_debug # pylint: disable=global-statement
global disable_all_extensions # pylint: disable=global-statement
rpn.flag.clear_flag(rpn.flag.F_SHOW_X) # Reset, because some argv may have set it to True
# Non-existence of ~/.rpnrc is indicator of novice mode
(rpnrc, _) = rpn.globl.lookup_variable("RPNRC")
if len(rpnrc.obj.value) == 0 and not disable_all_extensions:
rpn.globl.lnwriteln("Type ? for information, help <word> for help on a specific word.")
rpn.globl.lnwriteln("Type vlist for a list of all words, vars to see your variables.")
rpn.globl.lnwriteln("Type .s to display the stack non-destructively, and bye to exit.")
if not rpn.globl.param_stack.empty():
if rpn.globl.param_stack.size() == 1:
rpn.globl.eval_string("dup . cr")
else:
rpn.word.w_dot_s('.s')
if want_debug:
rpn.flag.set_flag(rpn.flag.F_DEBUG_ENABLED)
while True:
try:
(error, tok_list) = generate_token_list()
except StopIteration:
return
except TopLevel:
continue
if error is True:
rpn.globl.lnwriteln("main_loop: Parse error: Could not get next token")
s = " ".join([t.value for t in tok_list])
dbg("parse", 1, "s='{}'".format(s))
rpn.globl.eval_string(s)
def end_program():
if rpn.globl.sharpout.obj.value != 0:
rpn.globl.writeln()
if not rpn.globl.string_stack.empty():
if rpn.globl.string_stack.size() == 1:
rpn.word.w_dollar_dot('$.')
rpn.word.w_cr('cr')
else:
rpn.globl.lnwriteln("Strings:")
rpn.word.w_dollar_dot_s('$.s')
if not rpn.globl.param_stack.empty():
if rpn.globl.param_stack.size() == 1:
rpn.word.w_dot('.')
rpn.word.w_cr('cr')
else:
rpn.globl.lnwriteln("Stack:")
rpn.word.w_dot_s('.s')
def generate_token_list():
'''Returns a tuple (flag, list)
flag is True if initial parse error, False if no error'''
initial_parse_error = False
rpn.globl.parse_stack.clear()
tok_list = []
depth = {
'BRACKET' : 0,
'PAREN' : 0
}
while True:
# Get next token
tok = next(rpn.util.TokenMgr.next_token())
dbg("token", 1, "token({},{})".format(tok.type, repr(tok.value)))
# See if it's an immediate word; if so, call it
if tok.type == 'IDENTIFIER':
(word, _) = rpn.globl.lookup_word(tok.value)
if word is not None and word.immediate():
dbg("token", 3, "Word {} is immediate, calling...".format(word))
word.__call__(word.name)
continue
tok_list.append(tok)
# These need a second token or they will be very angry
elif tok.type in ['AT_SIGN', 'CATCH', 'CONSTANT', 'EXCLAM', 'FORGET',
'HELP', 'HIDE', 'SHOW', 'UNDEF', 'VARIABLE' ]:
rpn.globl.parse_stack.push(tok.type)
try:
tok2 = next(rpn.util.TokenMgr.next_token())
dbg("token", 1, "token({},{})".format(tok2.type, repr(tok2.value)))
except StopIteration:
initial_parse_error = True
dbg("token", 1, "{}: No more tokens, exiting".format(tok.type))
break
finally:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
tok_list.append(tok2)
elif tok.type in ['OPEN_BRACKET', 'CLOSE_BRACKET',
'OPEN_PAREN', 'CLOSE_PAREN']:
tok_list.append(tok)
# borp == "bracket or paren"
(open_close, borp) = tok.type.split("_")
#print("borp={}".format(borp))
if borp == 'PAREN':
c = '('
elif borp == 'BRACKET':
c = '['
if open_close == 'OPEN':
if borp == 'PAREN' and depth[borp] > 0:
rpn.globl.lnwriteln("{}: Embedded {} not allowed".format(tok.type, c))
initial_parse_error = True
else:
rpn.globl.parse_stack.push(c)
depth[borp] += 1
if open_close == 'CLOSE':
if rpn.globl.parse_stack.empty() or \
borp == 'BRACKET' and rpn.globl.parse_stack.top() != c or \
borp == 'PAREN' and rpn.globl.parse_stack.top() != '(,':
rpn.globl.lnwriteln("{}: {} lost".format(tok.type, c))
initial_parse_error = True
else:
rpn.globl.parse_stack.pop()
depth[borp] -= 1
elif tok.type == 'COMMA':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != '(':
rpn.globl.lnwriteln("{}: no matching (".format(tok.type))
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
rpn.globl.parse_stack.push('(,')
elif tok.type in ['BEGIN', 'CASE', 'COLON', 'DO', 'IF']:
tok_list.append(tok)
rpn.globl.parse_stack.push(tok.type)
elif tok.type in ['AGAIN', 'UNTIL']:
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'BEGIN':
rpn.globl.lnwriteln("{}: no matching BEGIN".format(tok.type))
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
elif tok.type == 'ELSE':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'IF':
rpn.globl.lnwriteln("ELSE: no matching IF")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
rpn.globl.parse_stack.push(tok.type)
elif tok.type == 'ENDCASE':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() not in ['CASE', 'OTHERWISE']:
rpn.globl.lnwriteln("ENDCASE: no matching CASE")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
elif tok.type == 'ENDOF':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'OF':
rpn.globl.lnwriteln("ENDOF: no matching OF")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
rpn.globl.parse_stack.push('CASE')
elif tok.type == 'ERROR':
rpn.globl.lnwriteln("ERROR {}".format(tok))
initial_parse_error = True
elif tok.type in ['LOOP', 'PLUS_LOOP']:
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'DO':
rpn.globl.lnwriteln("{}: no matching DO".format(tok.type))
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
elif tok.type in ['OF', 'OTHERWISE']:
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'CASE':
rpn.globl.lnwriteln("{}: no matching CASE".format(tok.type))
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
rpn.globl.parse_stack.push(tok.type)
elif tok.type == 'REPEAT':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'WHILE':
rpn.globl.lnwriteln("REPEAT: no matching WHILE")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
elif tok.type == 'SEMICOLON':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'COLON':
rpn.globl.lnwriteln("SEMICOLON: no matching COLON")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
elif tok.type == 'THEN':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() not in ['IF', 'ELSE']:
rpn.globl.lnwriteln("THEN: no matching IF")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
elif tok.type == 'WHILE':
if rpn.globl.parse_stack.empty() or rpn.globl.parse_stack.top() != 'BEGIN':
rpn.globl.lnwriteln("WHILE: no matching BEGIN")
initial_parse_error = True
else:
tok_list.append(tok)
rpn.globl.parse_stack.pop()
rpn.globl.parse_stack.push(tok.type)
else:
# 'ABORT_QUOTE',
# 'DOC_STR',
# 'DOT_QUOTE',
# 'VBAR',
# 'WS',
tok_list.append(tok)
# Here's what breaks the while True loop, sauf StopIteration
if rpn.globl.parse_stack.empty() and depth['PAREN'] == 0 and depth['BRACKET'] == 0:
break
return (initial_parse_error, tok_list)
# Simple SIGWINCH handler can become overwhelmed and crash if window
# changes come too fast. Consider using shutil.get_terminal_size()
def sigwinch_handler(_signum, _frame):
rpn.globl.update_screen_size()
def sigint_handler(_signam, _frame):
rpn.globl.sigint_detected = True
# It is NOT safe to do I/O inside a signal handler.
# Can crash with error:
# RuntimeError: reentrant call inside <_io.BufferedWriter name='<stdout>'>
# sys.stderr.write("^C")
# sys.stderr.flush()
# rpn.globl.eval_string("?cr")
throw(X_INTERRUPT)
def sigquit_handler(_signum, _frame):
rpn.globl.lnwriteln("[Quit]")
raise EndProgram()
# def example_pre_hook_func(ident, cur_obj, new_obj):
# print("example_pre_hook_func:")
# print("ident ={}".format(ident))
# print("cur_obj={}".format(repr(cur_obj)))
# print("new_obj={}".format(repr(new_obj)))
# # Check against None first due to undef case
# if new_obj is not None and new_obj.value < 0:
# throw(X_INVALID_ARG, "!{}".format(identifier), "Must be positive")
#
# def example_post_hook_func(ident, old_obj, cur_obj):
# print("example_post_hook_func:")
# print("ident ={}".format(ident))
# print("old_obj={}".format(repr(old_obj)))
# print("cur_obj={}".format(repr(cur_obj)))
def pre_require_int(identifier, _cur, new):
if type(new) is not rpn.type.Integer:
throw(X_ARG_TYPE_MISMATCH, "!{}".format(identifier), "({})".format(typename(new)))
def pre_require_int_or_float(identifier, _cur, new):
if type(new) not in [rpn.type.Integer, rpn.type.Float]:
throw(X_ARG_TYPE_MISMATCH, "!{}".format(identifier), "({})".format(typename(new)))
def pre_require_positive(identifier, _cur, new):
if new.value <= 0:
throw(X_INVALID_ARG, "!{}".format(identifier), "Must be positive")
def pre_require_non_negative(identifier, _cur, new):
if new.value < 0:
throw(X_INVALID_ARG, "!{}".format(identifier), "Must be non-negative")
def pre_validate_Sreg_arg(identifier, _cur, new):
if type(new) is not rpn.type.Integer:
throw(X_ARG_TYPE_MISMATCH, "!{}".format(identifier), "({})".format(typename(new)))
new_Sreg = new.value
(reg_size, _) = rpn.globl.lookup_variable("SIZE")
if new_Sreg < 0 or new_Sreg > reg_size.obj.value - 6:
throw(X_INVALID_ARG, "!{}".format(identifier), "Sreg {} out of range (0..{} expected); check SIZE".format(new_Sreg, reg_size.obj.value - 6))
def pre_validate_size_arg(identifier, _cur, new):
if type(new) is not rpn.type.Integer:
throw(X_ARG_TYPE_MISMATCH, "!{}".format(identifier), "({})".format(typename(new)))
new_size = new.value
if new_size < rpn.globl.REG_SIZE_MIN or new_size > rpn.globl.REG_SIZE_MAX:
throw(X_INVALID_ARG, "!{}".format(identifier), "Size {} out of range ({}..{} expected)".format(new_size, rpn.globl.REG_SIZE_MIN, rpn.globl.REG_SIZE_MAX))
(reg_Sreg, _) = rpn.globl.lookup_variable("Sreg")
if new_size < reg_Sreg.obj.value + 6:
throw(X_INVALID_ARG, "!{}".format(identifier), "Size {} too small for Sreg ({})".format(new_size, reg_Sreg.obj.value))
def post_clear_newly_unveiled_registers(_identifier, old, cur):
old_size = old.value
cur_size = cur.value
# If we're increasing the number of registers, zero out the newly
# available ones. It is not really necessary to do this when
# decreasing, because those registers will no longer be accessible.
if cur_size > old_size:
for r in range(cur_size - old_size):
rpn.globl.register[old_size + r] = rpn.type.Float(0.0)
def post_label_with_identifier(identifier, _old, cur):
cur.label = identifier
| 1.914063 | 2 |
multi_tenant/tenant/models/tenant.py | AnsGoo/djangoMultiTenant | 1 | 12790383 | from datetime import datetime
from typing import Dict, Tuple
from django.db import models
from multi_tenant.tenant.utils.pycrypt import crypt
from multi_tenant.const import DEFAULT_DB_ENGINE_MAP
from django.conf import settings
DAFAULT_DB = settings.DATABASES['default']
class TenantManager(models.Manager):
def create_tenant(self, code, name,**kwargs):
if not code:
raise ValueError('The given code must be set')
if not name:
raise ValueError('The given name must be set')
password = kwargs.pop('db_password',None)
tenant = self.model(code=code, name=name, **kwargs)
if password:
tenant.db_password = crypt.encrypt(password)
tenant.save(using=self._db)
return tenant
class AbstractTenant(models.Model):
Mysql, SQLite, Postgres, Oracle = ('Mysql', 'SQLite3', 'Postgres', 'Oracle')
engine_choices = (
(Mysql, Mysql),
(SQLite, SQLite),
(Postgres, Postgres),
(Oracle, Oracle),
)
create_date: datetime = models.DateTimeField(auto_now_add=True)
name: str = models.CharField(max_length=20, unique=True)
label: str = models.CharField(max_length=200)
code: str = models.CharField(max_length=10, unique=True)
db_password: str = models.CharField(max_length=128, null=True, blank=True)
db_name: str = models.CharField(max_length=50)
engine: str = models.CharField(max_length=10, null=True, blank=True, choices=engine_choices)
options: str = models.JSONField(null=True, blank=True)
is_active: bool = models.BooleanField(default=True)
_password = None
CODE_FIED = 'code'
objects = TenantManager()
def __str__(self) -> str:
return self.name
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self._password is not None:
if self.db_password:
raw_password = <PASSWORD>(self.db_password)
self.db_password = <PASSWORD>
self._password = None
self.save()
def delete(self, using: str=None, keep_parents: bool=False, force: bool = False) -> Tuple[int, Dict[str, int]]:
if force:
super().delete(using,keep_parents)
else:
raise PermissionError(f'{self.code} can not delete')
def create_database(self) -> bool:
from multi_tenant.tenant.utils.db import MutlTenantOriginConnection
if self.engine.lower() == self.SQLite.lower():
connection = MutlTenantOriginConnection().create_connection(tentant=self, popname=False)
return True
elif self.engine.lower() == self.Postgres.lower():
connection = MutlTenantOriginConnection().create_connection(tentant=self, popname=True, **{'NAME':'postgres'})
else:
connection = MutlTenantOriginConnection().create_connection(tentant=self, popname=True)
create_database_sql = self.create_database_sql
if create_database_sql:
with connection.cursor() as cursor:
cursor.execute(create_database_sql)
return True
class Meta:
db_table = 'auth_tenant'
verbose_name = '租户'
verbose_name_plural = '租户'
abstract = True
def get_db_config(self) -> Dict:
if self.engine:
engine_name = self.engine.lower()
else:
default_engine = DAFAULT_DB['ENGINE']
engine_name = self.inject_engine(default_engine)
if hasattr(self,f'_create_{engine_name}_dbconfig'):
return getattr(self,f'_create_{engine_name}_dbconfig')()
else:
raise NotImplementedError(f'create_{engine_name}_dbconfig is not implemente')
@staticmethod
def inject_engine(name):
for key ,value in DEFAULT_DB_ENGINE_MAP.items():
if name == value:
return key
def _create_common_dbconfig(self) -> Dict:
password = <PASSWORD>_DB['PASSWORD']
engine = self.get_engine()
options = self.options
if not self.options:
options = dict()
if self.db_password:
password = <PASSWORD>(self.db_password)
return {
'ENGINE': engine,
'NAME': self.db_name,
'USER': options.pop('user', DAFAULT_DB['USER']),
'PASSWORD': password,
'HOST': options.pop('host', DAFAULT_DB['HOST']),
'PORT': options.pop('port', DAFAULT_DB['PORT']),
**options
}
def _create_sqlite3_dbconfig(self) -> Dict:
engine = self.get_engine()
return {
'ENGINE': engine,
'NAME': settings.BASE_DIR.joinpath(self.db_name)
}
def _create_mysql_dbconfig(self) -> Dict:
return self._create_common_dbconfig()
def _create_postgres_dbconfig(self) -> Dict:
return self._create_common_dbconfig()
def _create_oracle_dbconfig(self,) -> Dict:
return self._create_common_dbconfig()
def get_engine(self) -> str:
engine = DAFAULT_DB['ENGINE']
if self.engine:
engine = DEFAULT_DB_ENGINE_MAP.get(self.engine.lower())
if not engine:
raise ValueError(f'unkown engine {self.engine}, engine must be in {list(DEFAULT_DB_ENGINE_MAP.keys())}')
return engine
@property
def create_database_sql(self) -> str:
engine_name = self.engine.lower()
if hasattr(self,f'_create_{engine_name}_database'):
return getattr(self,f'_create_{engine_name}_database')()
else:
raise NotImplementedError(f'_create_{engine_name}_database is not implemente')
def _create_sqlite3_database(self) -> str:
pass
def _create_mysql_database(self) -> str:
return f"CREATE DATABASE IF NOT EXISTS {self.db_name} character set utf8;"
def _create_postgres_database(self) -> str:
return f"CREATE DATABASE \"{self.db_name}\" encoding 'UTF8';"
def _create_oracle_database(self) -> str:
return f"CREATE DATABASE {self.db_name} DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci;"
class Tenant(AbstractTenant):
pass
| 2.1875 | 2 |
src/main.py | GDSC-UIT/RealTime-Emotion-Recognizer | 1 | 12790384 | <gh_stars>1-10
import cv2
import numpy as np
from tensorflow.keras.models import model_from_json
from tensorflow.keras.preprocessing import image
''' Load model '''
trained_model = model_from_json(open("model/vgg-face-model.json", "r").read())
''' Load weights '''
trained_model.load_weights('model/vgg-face.h5')
cap = cv2.VideoCapture(0)
black = np.zeros((96,96))
emotions = ('happy', 'sorrow', 'neutral')
face_haar_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
while True:
ret,test_img = cap.read()
if not ret:
continue
gray_img= cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)
faces_detected = face_haar_cascade.detectMultiScale(gray_img, 1.32, 5)
for (x, y, w, h) in faces_detected:
cv2.rectangle(test_img, (x,y), (x+w,y+h), (255,0,0), thickness=4)
roi_gray = gray_img[y:y+w, x:x+h]
roi_gray = cv2.resize(roi_gray, (48,48))
img_pixels = image.img_to_array(roi_gray)
cv2.imshow('Gray img', roi_gray)
img_pixels = np.expand_dims(img_pixels, axis = 0)
img_pixels /= 255
predictions = trained_model.predict(img_pixels)
''' Find max indexed array '''
max_index = np.argmax(predictions[0])
predicted_emotion = emotions[max_index]
cv2.putText(test_img, predicted_emotion, (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1, (219,68,55), 2)
resized_img = cv2.resize(test_img, (1000, 700))
cv2.imshow('Predicted image', resized_img)
if cv2.waitKey(10) == ord('q'):
break
cap.release()
cv2.destroyAllWindows | 2.46875 | 2 |
linga/app.py | pageer/Linga | 0 | 12790385 | """Main application initilization."""
import os.path
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_login import LoginManager
BOOK_PATH = os.path.normpath(os.path.join(os.path.dirname(__file__), '..', 'books'))
# Make sure to add your own secret key in config.py
SECRET_KEY = "<KEY>"
# Environment variable for config file name
ENV_KEY = 'LINGA_CONFIG_FILE'
CONFIG_FILE = os.environ[ENV_KEY] if os.environ.get(ENV_KEY) else '../config.py'
SQLALCHEMY_TRACK_MODIFICATIONS = False
app = Flask(__name__) #pylint: disable=invalid-name
app.config.from_object(__name__)
app.config.from_pyfile(CONFIG_FILE, silent=True)
db = SQLAlchemy(app) #pylint: disable=invalid-name
login_manager = LoginManager() #pylint: disable=invalid-name
# Try to accomodate old versions of flask-login
try:
login_manager.init_app(app)
except Exception as ex: #pylint: disable=broad-except
login_manager.setup_app(app)
def get_config(key=''):
"""Get a key value from the app config, or the entire config if no key given."""
if key:
return app.config[key]
return app.config
@app.after_request
def after_request(req):
return req
| 2.46875 | 2 |
models/model_utils.py | ARM-software/sesr | 25 | 12790386 | # Copyright 2021 Arm Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import tensorflow as tf
import numpy as np
from models.quantize_utils import fake_quant_with_min_max_vars_per_channel, fake_quant_with_min_max_vars, compute_ranges
##############################
## LINEAR BLOCK DEFINITIONS ##
##############################
#EXPANDED Linear block
class LinearBlock_e(tf.keras.layers.Layer):
def __init__(self,
in_filters: int,
num_inner_layers: int,
kernel_size: int,
padding: str,
out_filters: int,
feature_size: int,
quant_W: bool,
mode: str):
super().__init__()
"""
Expanded linear block. Input --> 3x3 Conv to expand number of channels
to 'feature_size' --> 1x1 Conv to project channels into 'out_filters'.
At inference time, this can be analytically collapsed into a single,
small 3x3 Conv layer. See also the LinearBlock_c class which is a
very efficient method to train linear blocks without any loss in
image quality.
"""
assert not quant_W, 'expanded linear block not compatible with w quant'
def conv2d(filters: int, kernel_size_: int) -> tf.keras.layers.Layer:
return tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size_, padding=padding)
layers = []
for _ in range(num_inner_layers):
layers.extend([conv2d(filters=feature_size, kernel_size_=kernel_size)])
layers.append(conv2d(filters=out_filters, kernel_size_=1))
self.block = tf.keras.Sequential(layers)
self.mode = mode
def call(self, inputs, training=None, mask=None):
return self.block(inputs, training=training)
#COLLAPSED Linear block
class LinearBlock_c(tf.keras.layers.Layer):
def __init__(self,
in_filters: int,
num_inner_layers: int,
kernel_size: int,
padding: str,
out_filters: int,
feature_size: int,
quant_W: bool,
mode: str):
tf.keras.layers.Layer.__init__(self)
"""
This is a simulated linear block in the train path. The idea is to collapse
linear block at each training step to speed up the forward pass. The backward
pass still updates all the expanded weights.
After training is completed, the weight generation ops are replaced by
a tf.constant at pb/tflite generation time.
----------------------------------------------------------------
| padded_identity |
| | |
| conv1x1(inCh, r*inCh) [optional] |
| | |
| convkxk(r*inCh, r*inCh) |
| | |
| conv1x1(r*inCh, outCh) |
| | |
| simulating residual: identity -> + |
| (or) padded_conv1x1_wt | (weight_tensor generated)|
----------------------------------------------------------------
|
input_tensor -> Actual convkxk(inCh, outCh)
|
Final output
"""
def conv2d(filters: int, kernel_size_: int, padding_: str) -> tf.keras.layers.Layer:
return tf.keras.layers.Conv2D(filters=filters, kernel_size=kernel_size_, padding=padding_)
# Params
self.in_filters = in_filters
self.out_filters = out_filters
self.feature_size = feature_size
self.quant_W = quant_W
self.mode = mode
# If num_inner_layers > 1, then use another conv1x1 at the beginning
onebyone = True if num_inner_layers > 1 else False
# expansion with kx,ky kernel and then project to out_filters using 1x1
kernel_size = [kernel_size, kernel_size]
self.kx, self.ky = kernel_size
# Learnable Collapse Conv's
conv1 = conv2d(feature_size, [1, 1], "valid")
conv2 = conv2d(feature_size, kernel_size, "valid")
conv3 = conv2d(out_filters, [1, 1], "valid")
self.collapsed_weights = None
# Define Collapse Block
if onebyone:
self.collapse = tf.keras.Sequential([conv1, conv2, conv3])
else:
self.collapse = tf.keras.Sequential([conv2, conv3])
if self.mode == 'train':
self.fake_quant_with_min_max_vars_per_channel_fn = \
fake_quant_with_min_max_vars_per_channel
elif self.mode == 'infer':
self.fake_quant_with_min_max_vars_per_channel_fn = \
tf.quantization.fake_quant_with_min_max_vars_per_channel
def build(self, input_shapes):
# shape: (in_filters,in_filters)
delta = tf.eye(self.in_filters)
# expanded shape:(in_filters, 1, 1, in_filters)
delta = tf.expand_dims(tf.expand_dims(delta, 1), 1)
# padded shape: (in_filters, kx, ky, in_filters)
delta = tf.pad(delta, paddings=[[0, 0], [self.kx - 1, self.kx - 1], [self.ky - 1, self.ky - 1], [0, 0]])
# Ensure the Value isn't trainable
self.delta = tf.Variable(initial_value=delta, trainable=False, dtype=tf.float32)
if self.quant_W:
self.wt_quant_min = self.add_weight(
name='wt_quant_min',
shape=(self.out_filters,),
trainable=True)
self.wt_quant_max = self.add_weight(
name='wt_quant_max',
shape=(self.out_filters,),
trainable=True)
if self.mode == "train":
self.wt_quant_initialized = tf.Variable(False, trainable=False)
# Calculate Residual
kernel_dim = [self.kx, self.ky, self.in_filters, self.out_filters]
residual = np.zeros(kernel_dim, dtype=np.float32)
if self.in_filters == self.out_filters:
mid_kx = int(self.kx / 2)
mid_ky = int(self.ky / 2)
for out_ch in range(self.out_filters):
residual[mid_kx, mid_ky, out_ch, out_ch] = 1.0
# Ensure the Value isn't trainable
self.residual = tf.Variable(initial_value=residual, trainable=False, dtype=tf.float32)
def init_wt_quant_ranges(self, kernel: tf.Tensor) -> None:
quant_max, quant_min = compute_ranges(kernel, per_channel=True, symmetric=True)
self.wt_quant_max.assign(quant_max)
self.wt_quant_min.assign(quant_min)
self.wt_quant_initialized.assign(True)
def call(self, inputs):
if self.mode == "train" or (self.collapsed_weights is None):
# Run Through Conv2D's - online linear collapse
wt_tensor = self.collapse(self.delta)
# reverse order of elements in 1,2 axes
wt_tensor = tf.reverse(wt_tensor, tf.constant([1, 2]))
# (in_filters, kx, ky, out_filters) -> (kx, ky, in_filters, out_filters)
wt_tensor = tf.transpose(wt_tensor, [1, 2, 0, 3])
# Direct-residual addition
# when in_filters != self.out_filters, this is just zeros
wt_tensor += self.residual
if self.mode == "infer":
# store collapsed weights in the first inferece, won't need to collapse again
self.collapsed_weights = tf.Variable(
initial_value=wt_tensor,
trainable=False,
dtype=tf.float32)
# remove references to uncollapsed variables
self.collapse = None
else:
# use pre-collapsed weights
wt_tensor = self.collapsed_weights
if self.mode == "train":
if self.quant_W:
if not self.wt_quant_initialized:
self.init_wt_quant_ranges(wt_tensor)
elif self.mode == "infer":
pass
else:
assert False, self.mode
if self.quant_W:
wt_tensor = self.fake_quant_with_min_max_vars_per_channel_fn(
wt_tensor,
min=self.wt_quant_min,
max=self.wt_quant_max,
num_bits=8,
narrow_range=True)
# Output - the actual conv2d
out = tf.nn.conv2d(inputs, wt_tensor, strides=[1, 1, 1, 1], padding="SAME")
return out
| 2.40625 | 2 |
Polynomial Generation.py | Sarder-Iftekhar/Numerical_Method | 0 | 12790387 | <filename>Polynomial Generation.py<gh_stars>0
import numpy as np
# create as many polynomials as size of coeff_vector
final_pol = np.polynomial.Polynomial([0.]) # our target polynomial
n = coeff_vector.shape[0] # get number of coeffs
for i in range(n):
p = np.polynomial.Polynomial([1.]) # create a dummy polynomial
for j in range(i):
# each vector has degree of i
# their terms are dependant on 'x' values
p_temp = np.polynomial.Polynomial([-x[j], 1.]) # (x - x_j)
p = np.polymul(p, p_temp) # multiply dummy with expression
p *= coeff_vector[i] # apply coefficient
final_pol = np.polyadd(final_pol, p) # add to target polynomial
p = np.flip(final_pol[0].coef, axis=0)
print(p) | 3.34375 | 3 |
tests/test_parser.py | b2wads/maas | 0 | 12790388 | <filename>tests/test_parser.py
from aioresponses import aioresponses
from asynctest import TestCase
from yarl import URL
from contrib.parser import Plus, Value, Minus, Divide, Times, Exponent
from tests.util import (
plus_service_callback,
minus_service_callback,
divide_service_callback,
multiply_service_callback,
power_service_callback,
)
class ParserValueTest(TestCase):
async def test_value_has_async_eval(self):
v1 = Value("10")
self.assertEqual(10.0, await v1.eval())
class ParserTest(TestCase):
async def setUp(self):
pass
async def test_plus_calls_service(self):
plus = Plus()
plus.addChild(Value("4"))
plus.addChild(Value("10"))
with aioresponses() as rsps:
rsps.post("http://plus.service", callback=plus_service_callback)
result = await plus.eval()
plus_service_call = rsps.requests[
("POST", URL("http://plus.service"))
][0].kwargs["json"]
self.assertEqual(plus_service_call, {"left": 4, "right": 10})
self.assertEqual(14.0, result)
async def test_minus_calls_service(self):
plus = Minus()
plus.addChild(Value("4"))
plus.addChild(Value("10"))
with aioresponses() as rsps:
rsps.post("http://minus.service", callback=minus_service_callback)
result = await plus.eval()
plus_service_call = rsps.requests[
("POST", URL("http://minus.service"))
][0].kwargs["json"]
self.assertEqual(plus_service_call, {"left": 4, "right": 10})
self.assertEqual(-6.0, result)
async def test_divide_calls_service(self):
plus = Divide()
plus.addChild(Value("15"))
plus.addChild(Value("5"))
with aioresponses() as rsps:
rsps.post("http://divide.service", callback=divide_service_callback)
result = await plus.eval()
plus_service_call = rsps.requests[
("POST", URL("http://divide.service"))
][0].kwargs["json"]
self.assertEqual(plus_service_call, {"left": 15, "right": 5})
self.assertEqual(3.0, result)
async def test_multiply_calls_service(self):
plus = Times()
plus.addChild(Value("15"))
plus.addChild(Value("5"))
with aioresponses() as rsps:
rsps.post(
"http://multiply.service", callback=multiply_service_callback
)
result = await plus.eval()
plus_service_call = rsps.requests[
("POST", URL("http://multiply.service"))
][0].kwargs["json"]
self.assertEqual(plus_service_call, {"left": 15, "right": 5})
self.assertEqual(75.0, result)
async def test_power_calls_service(self):
plus = Exponent()
plus.addChild(Value("15"))
plus.addChild(Value("5"))
with aioresponses() as rsps:
rsps.post("http://power.service", callback=power_service_callback)
result = await plus.eval()
plus_service_call = rsps.requests[
("POST", URL("http://power.service"))
][0].kwargs["json"]
self.assertEqual(plus_service_call, {"left": 15, "right": 5})
self.assertEqual(759_375, result)
| 2.75 | 3 |
Python/Weather Station.py | KaushikNeelichetty/IoT-Based-Weather-Station-with-Raspberry-Pi | 3 | 12790389 | <gh_stars>1-10
import urllib2, urllib #The libraries needed for the POST Request
import sys # Importing the sys python package
import Adafruit_BMP.BMP085 as BMP085 #Importing the package needed for the BMP Sensor
import Adafruit_DHT #Importing the package needed for the DHT Sensor
import serial#for interfacing with GPS module as well as the Arduino via Serial
import RPi.GPIO as GPIO#for using the GPIO pins on the board
import os, time#for delay functions
from decimal import *#for precision
GPIO.setmode(GPIO.BOARD)#for setting the gpio header
def find(str, ch):
for i, ltr in enumerate(str):
if ltr == ch:
yield i
def getLocation():
try:
GPSport = serial.Serial("/dev/ttyAMA0", baudrate=9600, timeout=1)#the port to which the gps is connected to
ck=0
fd=''
while ck <= 50:
rcv = GPSport.read(10)
fd=fd+rcv
ck=ck+1
if '$GPRMC' in fd:
ps=fd.find('$GPRMC')
dif=len(fd)-ps
if dif > 50:
data=fd[ps:(ps+50)]
p=list(find(data, ","))
lat=data[(p[2]+1):p[3]]
lon=data[(p[4]+1):p[5]]
s1=lat[2:len(lat)]
s1=Decimal(s1)
s1=s1/60
s11=int(lat[0:2])
s1=s11+s1
s2=lon[3:len(lon)]
s2=Decimal(s2)
s2=s2/60
s22=int(lon[0:3])
s2=s22+s2
s2='{0:0.6f}'.format(s2)
s1='{0:0.6f}'.format(s1)
return str(s1),str(s2)
except:
s2="Data Unavailable"
s1="Data Unavailable"
return str(s1),str(s2)#latitude,longitude
def readDHT():
#This function will take the readings from the sensor, perform a not null validation and send the data to the calling fucntion
DHTSensor = Adafruit_DHT.DHT22 # Selecting the type of DHT Sensor
DHTpin=4
DHTHumidity, DHTTemp = Adafruit_DHT.read_retry(DHTSensor, DHTpin)
DHTHumidity='{0:0.2f}'.format(DHTHumidity)
DHTTemp='{0:0.2f}'.format(DHTTemp)
if DHTHumidity is not None and DHTTemp is not None:
return DHTHumidity,DHTTemp
else:
print('Failed to get reading from DHT22. Try again!')
return "Data Unavailable","Data Unavailable"
def readBMP():
#This function will tkae the readings from the sensor, perform a not null validation and send the data to the calling function
BMPSensor = BMP085.BMP085() #Selecting the type of BMP Sensor, the sensor used in my station in BMP180 but the class is only available for BMP085, both use the same class and fucntions and connection circuit
BMPTemp = '{0:0.2f}'.format(BMPSensor.read_temperature())
pressure = '{0:0.2f}'.format(BMPSensor.read_pressure())
altitude = '{0:0.2f}'.format(BMPSensor.read_altitude())
seaLevelPressure = '{0:0.2f}'.format(BMPSensor.read_sealevel_pressure())
if BMPTemp is not None and pressure is not None and altitude is not None and seaLevelPressure is not None:
return BMPTemp,pressure,altitude,seaLevelPressure
else:
print('Failed to get reading from BMP180. Try again!')
return "Data Unavailable","Data Unavailable"
def readArduino():
readSerialSplit=[]
try:
arduinoSerial = serial.Serial('/dev/ttyACM0',9600)# The port to which the arduino is connected to.
readSerial=arduinoSerial.readline()
readSerialSplit=readSerial.split("*")
return readSerialSplit
except:
readSerialSplit=['Data Unavailable','Data Unavailable','Data Unavailable']
return readSerialSplit
def main():
#DHTHumidity,DHTTemp=readDHT()
#BMPTemp,pressure,altitude,seaLevelPressure=readBMP()
#latitude,longitude=getLocation()
#temperature=str(((float(DHTTemp)+float(BMPTemp)))/2)
arduinoReading=readArduino()
rainfall=arduinoReading[0]
light=arduinoReading[1]
co2=arduinoReading[2]
print('Rainfall data from Uno '+rainfall)
print('Light Intensity data from Uno '+light)
print('CO2 Concentration from Uno '+co2)
#weatherData=[('temperature',temperature),('pressure',pressure),('altitude',altitude),('seapressure',seaLevelPressure),('humidity',DHTHumidity),('latitude',latitude),('longitude',longitude),('lightintensity',light),('co2',co2),('rainfall',rainfall)]
#weatherData=urllib.urlencode(weatherData)
#path="http://aashish.noip.me/temperature/index.php"
#request=urllib2.Request(path,weatherData)
#request.add_header("Content-type","application/x-www-form-urlencoded")
#page=urllib2.urlopen(request).read()
#print(' A record has been succesfully updated into the Database ')
while(1):
main()
time.sleep(1)
| 3 | 3 |
08 - Operators and Operands/operator and operands.py | kuyesu/Scripting-With-Python-BIT-II | 1 | 12790390 | """
These are operators in python
-Comparison Operators
-Arithmetic Operators
-Membership Operators
-Assignment Operators
-Logical Operators
"""
"""
Arithmetic Operators
[
+
/
-
*
%
]
"""
""" Examples """
num1 = 2
num2 = 3
sum = num1 + num2
print(sum)
"""
Comparison Operators
<
>
=>
=<
!=
==
"""
""" Examples """
num1 = 2
num2 = 3
sum = num1 > num2
# print("num2 is greater than num2: ", sum)
if num1 > num2:
print("num1 is less than num2")
elif num1 == num2:
print("num2 is equal to num1 ")
elif num1 != num2:
print("num1 is not equal num2")
else:
print("num2 is greater than num1")
"""
Logical Operators
and
not
or
"""
""" Examples """
num1 = 2
num2 = 3
num3 = 4
if num1 > num2 and num2 < num3:
print("num1 is the smallest")
elif num1 > num2 or num2 < num3:
print("num2 is greater than num1 but less than num3")
x = 4
y = 6
z = 8
if x > y and z < y:
print("yes")
elif x > z or y < x:
print("maybe")
else:
print("x is least of all")
print(x in range(3))
print(x in range(6))
"""
Assignment
+=
-=
/=
*=
"""
""" Example """
num1 = 2
num2 = 3
sum = num1 + num2
sum += 9
print(sum)
sum += num1
print(sum)
sum *= num1
print(sum)
sum -= num1
print(sum)
sum /= num1
print(sum)
a = 1
b = 9
c = 10
num = a+b+c
print(num)
num /= c
print(num)
num += a
num *= c
num /= c
print(num)
"""
Membership Operation
is
is not
in
"""
""" Example """
name = "<NAME>"
print("Bam" in name)
if "Zam" in name:
print("Bam is found in name")
else:
print("The letters are not found in name")
action = "Joan is eating beef."
if "eat" in action:
print("eating in progress")
else:
print("nothing") | 4.40625 | 4 |
Callproj/core/views.py | Prakhar-100/DjangoProject | 0 | 12790391 | <reponame>Prakhar-100/DjangoProject
from django.shortcuts import render
from django.http import HttpResponse
# from django.views.decorators.csrf import csrf_exempt
# from twilio.rest import TwilioRestClient as Call
from twilio.rest import Client
# from twilio.twiml.voice_response import VoiceResponse
from core.forms import CallingForm
# Create your views here.
# twiml='<Response><Say>Ahoy there!</Say></Response>'
# @csrf_exempt
# Phone Number SID
# PN45c8ae16547c0c82b1ff6a041945bda2
# MESSAGING_SERVICE_SID = ZS9a111cae1ab7a3573a63226a61ca11a9
def my_call_router():
From_Number = '+12254143610'
To_Number = '+916260336626'
Src_path = "http://demo.twilio.com/docs/voice.xml"
Account_Sid = "<KEY>"
Auth_Token = "<PASSWORD>"
client = Client(Account_Sid, Auth_Token)
print("Call initiated")
call = client.calls.create(
url = Src_path,
to = To_Number,
from_ = From_Number,
)
print(call.sid)
# service = client.proxy.services.create(unique_name='unique_name')
# print(service.sid)
# session = client.proxy.services('KSXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX') \
# .sessions \
# .create(unique_name='MyFirstSession')
# print(session.sid)
# phone_number = client.proxy.services('ZS9a111cae1ab7a3573a63226a61ca11a9').phone_numbers.create(sid='ACf18711ab713d4b15a1ddc654377761d9')
# print(phone_number.sid)
print("Call has been triggered successfully !")
def index(request):
form = CallingForm()
if request.method == 'POST':
form = CallingForm(request.POST)
if form.is_valid():
name = form.cleaned_data.get('name')
mob = form.cleaned_data.get('mob')
my_call_router()
return render(request, 'core/index.html', {'form': form})
return render(request, 'core/index.html', {'form': form}) | 2.21875 | 2 |
cogs/utils/player.py | FrostiiWeeb/OpenRobot-Bot | 8 | 12790392 | <gh_stars>1-10
# Thanks to https://github.com/Axelware/Life-bot/blob/main/bot/utilities/custom/player.py
from __future__ import annotations
import discord
import asyncio
import yarl
import humanize
import datetime
import slate
import async_timeout
import slate.obsidian
from discord.components import SelectOption
from discord.ext import commands
from .enums import Filters
class Queue(slate.Queue[slate.obsidian.Track]):
def __init__(self, player: Player, /) -> None:
super().__init__()
self.player: Player = player
def put(
self,
items: list[slate.obsidian.Track[commands.Context]]
| slate.obsidian.Track[commands.Context],
/,
*,
position: int | None = None,
) -> None:
super().put(items, position=position)
self.player._queue_add_event.set()
self.player._queue_add_event.clear()
class Player(slate.obsidian.Player["commands.Bot", commands.Context, "Player"]):
def __init__(self, client: commands.Bot, channel: discord.VoiceChannel) -> None:
super().__init__(client, channel)
self.bot = client
self._queue_add_event: asyncio.Event = asyncio.Event()
self._track_end_event: asyncio.Event = asyncio.Event()
self._task: asyncio.Task | None = None
self._text_channel: discord.TextChannel | None = None
self.message: discord.Message | None = None
self.skip_request_ids: set[int] = set()
self.enabled_filters: set[Filters] = set()
self.queue: Queue = Queue(self)
self._volume = 1
@property
def text_channel(self) -> discord.TextChannel | None:
return self._text_channel
@property
def voice_channel(self) -> discord.VoiceChannel:
return self.channel
async def invoke_controller(
self, channel: discord.TextChannel | None = None
) -> discord.Message | None:
if (channel is None and self.text_channel is None) or self.current is None:
return
text_channel = channel or self.text_channel
if text_channel is None:
return
embed = discord.Embed(
title="Now playing:",
description=f"**[{self.current.title}]({self.current.uri})**\nBy **{self.current.author}**",
color=self.bot.color,
).set_thumbnail(url=self.current.thumbnail)
embed.add_field(
name="__Player info:__",
value=f"**Paused:** {self.paused}\n"
f"**Loop mode:** {self.queue.loop_mode.name.title()}\n"
f"**Queue length:** {len(self.queue)}\n"
f"**Queue time:** {humanize.naturaldelta(datetime.timedelta(seconds=sum(track.length for track in self.queue) // 1000))}\n"
if not any([track.is_stream() for track in self.queue])
else "**Queue time:** Unable to determine.",
)
embed.add_field(
name="__Track info:__",
value=f"**Time:** {humanize.naturaldelta(datetime.timedelta(seconds=self.position // 1000))} / {humanize.naturaldelta(datetime.timedelta(seconds=self.current.length // 1000)) if not self.current.is_stream() else 'LIVE'}\n"
f"**Is Stream:** {self.current.is_stream()}\n"
f"**Source:** {self.current.source.value.title()}\n"
f"**Requester:** {self.current.requester.mention if self.current.requester else 'N/A'}\n",
)
if not self.queue.is_empty():
entries = [
f"**{index + 1}.** [{entry.title}]({entry.uri})"
for index, entry in enumerate(list(self.queue)[:3])
]
if len(self.queue) > 3:
entries.append(
f"**...**\n**{len(self.queue)}.** [{self.queue[-1].title}]({self.queue[-1].uri})"
)
embed.add_field(name="__Up next:__", value="\n".join(entries), inline=False)
return await text_channel.send(embed=embed)
async def set_volume(self, volume: int | float):
await self.set_filter(slate.obsidian.Filter(self.filter, volume=volume))
old_volume = self._volume
self._volume = volume
return (old_volume, volume)
@property
def volume(self):
return self._volume
async def set_filter(self, filter: slate.obsidian.Filter, /, *, seek: bool = False):
return await super().set_filter(filter, seek=seek)
async def send(self, *args, **kwargs) -> None:
if not self.text_channel:
return
await self.text_channel.send(*args, **kwargs)
async def search(
self,
query: str,
/,
*,
source: slate.Source,
ctx: commands.Context,
) -> slate.obsidian.SearchResult[commands.Context]:
if (url := yarl.URL(query)) and url.host and url.scheme:
source = slate.Source.NONE
try:
search = await self._node.search(query, ctx=ctx, source=source)
except slate.NoMatchesFound as error:
if error.source:
message = f"No {error.source.value.lower().replace('_', ' ')} {error.search_type.value}s were found for your search."
else:
message = (f"No results were found for your search.",)
raise error
except (slate.SearchError, slate.HTTPError) as exc:
raise exc
return search
async def queue_search(
self,
query: str,
/,
*,
source: slate.Source,
ctx: commands.Context,
now: bool = False,
next: bool = False,
choose: bool = False,
message: discord.Message = None,
delete_message: bool = False,
) -> None:
search = await self.search(query, source=source, ctx=ctx)
if message and delete_message:
try:
await message.delete()
except:
pass
if choose:
entries = []
c = 0
for index, track in enumerate(search.tracks):
if c == 10:
break
entries.append((f"{index + 1:}", track.title, track.uri, track))
c += 1
embed = discord.Embed(
color=self.bot.color,
title="Select the number of the track you want to play.",
)
embed.description = ""
for index, title, url, _ in entries:
embed.description += f"`{index}`. [`{title}`]({url})\n"
class Select(discord.ui.Select):
def __init__(self):
super().__init__(
placeholder="Select an option.",
options=[
SelectOption(label=index + " - " + title, description=url)
for index, title, url, _ in entries
],
)
async def callback(self, interaction: discord.Interaction):
x = discord.utils.find(
lambda option: self.values[0] == option.label, self.options
)
for child in self.view.children:
child.disabled = True
await interaction.response.defer()
await self.view.msg.edit(
view=self.view,
content=f"You selected {x.label} - <{x.description}>.",
)
self.view.stop()
self.view.value = (x, entries[int(x.label.split(" - ")[0])])
return self.view.value
class View(discord.ui.View):
def __init__(self, ctx):
super().__init__(timeout=60)
self.ctx = ctx
self.msg = None
self.value = None
self.add_item(Select())
async def on_timeout(self) -> None:
for child in self.children:
child.disabled = True
await self.msg.edit(view=self, content="Timed Out.")
view = View(ctx)
view.msg = msg = await ctx.send(embed=embed, view=view)
await view.wait()
if not view.value:
return
await msg.delete()
tracks = view.value[1][3]
else:
tracks = (
search.tracks[0]
if search.search_type is slate.SearchType.TRACK
else search.tracks
)
self.queue.put(tracks, position=0 if (now or next) else None)
if now:
await self.stop()
if search.search_type is slate.SearchType.TRACK or isinstance(
search.result, list
):
description = f"Added the {search.source.value.lower()} track [{search.tracks[0].title}]({search.tracks[0].uri}) to the queue."
else:
description = f"Added the {search.source.value.lower()} {search.type.name.lower()} [{search.result.name}]({search.result.uri}) to the queue."
await ctx.reply(
embed=discord.Embed(color=self.bot.color, description=description)
)
async def loop(self) -> None:
while True:
self._queue_add_event.clear()
self._track_end_event.clear()
if self.queue.is_empty():
try:
with async_timeout.timeout(timeout=3600):
await self._queue_add_event.wait()
except asyncio.TimeoutError:
await self.disconnect()
break
track = self.queue.get()
if track.source is slate.Source.SPOTIFY:
try:
search = await self.search(
f"{track.author} - {track.title}",
source=slate.Source.YOUTUBE,
ctx=track.ctx,
)
except Exception as error:
await self.send(embed=error.embed)
continue
track = search.tracks[0]
await self.play(track)
await self._track_end_event.wait()
async def connect(
self,
*,
timeout: float | None = None,
reconnect: bool | None = None,
self_deaf: bool = True,
) -> None:
await super().connect(timeout=timeout, reconnect=reconnect, self_deaf=self_deaf)
self._task = asyncio.create_task(self.loop())
async def disconnect(self, *, force: bool = False) -> None:
await super().disconnect(force=force)
if self._task is not None and self._task.done() is False:
self._task.cancel()
async def handle_track_start(self) -> None:
self.message = await self.invoke_controller()
async def handle_track_over(self) -> None:
self.skip_request_ids = set()
self._current = None
self._track_end_event.set()
self._track_end_event.clear()
if not self.message:
return
try:
old = self.queue._queue_history[0]
except IndexError:
return
# await self.message.edit(embed=utils.embed(description=f"Finished playing **[{old.title}]({old.uri})** by **{old.author}**."))
async def handle_track_error(self) -> None:
await self.send(
embed=discord.Embed(
colour=self.bot.color,
description=f"Something went wrong while playing a track.",
)
)
await self.handle_track_over()
| 2.078125 | 2 |
users/admin.py | shubhankar5/Mitron-Achatting-app-in-django | 7 | 12790393 | <reponame>shubhankar5/Mitron-Achatting-app-in-django
from django.contrib import admin
from .models import Profile, Address, Friends, BlockedUsers
admin.site.register([Profile, Address, Friends, BlockedUsers]) | 1.4375 | 1 |
Core/LogicClass/MonteCarloClass/MonteCarloMove.py | Needoliprane/ThePhantomOfTheOpera | 0 | 12790394 | import random
from LogicClass.MonteCarloClass.ArborescenteTree import ArborescenteTree
class MonteCarloMove:
def __init__(self, isInspector, isPhantom, numberOfRoom):
self.tree = ArborescenteTree()
self.isPhantom = isPhantom
self.isInspector = isInspector
self.numberOfRoom = numberOfRoom
def wiseMovePhantom(self, player, roomList):
roomPossibilities = random.sample(range(0, self.numberOfRoom), self.numberOfRoom - (int(self.numberOfRoom / 2)))
for roomIndex in roomPossibilities:
if (roomList[roomIndex].isOn() == False and len(roomList[roomIndex].getPlayers()) > 1):
self.tree.addPossibilities(roomList[roomIndex].id, value=30)
elif (roomList[roomIndex].isOn() == False):
self.tree.addPossibilities(roomList[roomIndex].id, value=15)
elif (len(roomList[roomIndex].getPlayers()) == 0):
self.tree.addPossibilities(roomList[roomIndex].id, value=10)
else:
self.tree.addPossibilities(roomList[roomIndex].id)
roomId = self.tree.chooseLeafMove(self.tree.headCell)
self.tree.headCell.childCell = []
if (roomId != None):
player.playerMove(roomList[roomId])
return
player.playerMove(roomList[random.randint(0, self.numberOfRoom - 1)])
def wiseMoveInspector(self, player, roomList):
if (player.monteCarloInspector == None):
value = random.randint(0, self.numberOfRoom - 1)
player.playerMove(roomList[value])
return
roomPossibilities = random.sample(range(0, self.numberOfRoom), self.numberOfRoom - (int(self.numberOfRoom / 2)))
for roomIndex in roomPossibilities:
for playerInTheRoom in roomList[roomIndex].getPlayers():
value = player.monteCarloInspector.tree.checkPresenceInTheNodeMove(playerInTheRoom.id, player.monteCarloInspector.tree.headCell.childCell, value=0)
if (value[0] == True):
self.tree.addPossibilities(roomIndex, value[1])
else:
self.tree.addPossibilities(roomIndex, value=1)
roomId = self.tree.chooseLeafMove(self.tree.headCell)
self.tree.headCell.childCell = []
if (roomId != None):
roomList[roomId].switchOnTheLight()
player.playerMove(roomList[roomId])
return
value = random.randint(0, self.numberOfRoom - 1)
roomList[value].switchOnTheLight()
player.playerMove(roomList[value])
def wiseMoveCharacter(self, player, roomList):
roomPossibilities = random.sample(range(0, self.numberOfRoom), int(self.numberOfRoom / 2))
for roomIndex in roomPossibilities:
if (roomList[roomIndex].isRunningJob() == True):
self.tree.addPossibilities(roomList[roomIndex].id, value=10)
else:
self.tree.addPossibilities(roomList[roomIndex].id)
roomId = self.tree.chooseLeafMove(self.tree.headCell)
self.tree.headCell.childCell = []
if (roomId != None):
roomList[roomId].switchOnTheLight()
player.playerMove(roomList[roomId])
return
value = random.randint(0, self.numberOfRoom - 1)
roomList[value].switchOnTheLight()
player.playerMove(roomList[value])
def wiseMove(self, player, roomList, specialTurn):
if (self.isPhantom == True and specialTurn == True):
self.wiseMovePhantom(player, roomList)
return
if (self.isInspector == True and specialTurn == True):
self.wiseMoveInspector(player, roomList)
return
self.wiseMoveCharacter(player, roomList)
| 3.015625 | 3 |
Codes/Abaqus_Indentation/2D/scripts_2D/geom_tip_ball.py | materialsguy/Predict_Nanoindentation_Tip_Wear | 0 | 12790395 | <filename>Codes/Abaqus_Indentation/2D/scripts_2D/geom_tip_ball.py
s1 = mdb.models['Model-1'].ConstrainedSketch(name='__profile__', sheetSize=200.0)
g, v, d, c = s1.geometry, s1.vertices, s1.dimensions, s1.constraints
s1.sketchOptions.setValues(viewStyle=AXISYM)
s1.setPrimaryObject(option=STANDALONE)
s1.ConstructionLine(point1=(0.0, -100.0), point2=(0.0, 100.0))
s1.FixedConstraint(entity=g[2])
s1.ArcByCenterEnds(center=(0.0, tip_r), point1=(0.0, 0.0), point2=(tip_r, tip_r), direction=COUNTERCLOCKWISE)
s1.Line(point1=(tip_r, tip_r), point2=(0.0, tip_r))
s1.Line(point1=(0.0, tip_r), point2=(0.0, 0.0))
p = mdb.models['Model-1'].Part(name='Part-2', dimensionality=AXISYMMETRIC, type=DEFORMABLE_BODY)
p = mdb.models['Model-1'].parts['Part-2']
p.BaseShell(sketch=s1)
s1.unsetPrimaryObject()
p = mdb.models['Model-1'].parts['Part-2']
session.viewports['Viewport: 1'].setValues(displayedObject=p)
del mdb.models['Model-1'].sketches['__profile__']
p = mdb.models['Model-1'].parts['Part-2']
s = p.edges
side1Edges = s.getSequenceFromMask(mask=('[#2 ]', ), )
p.Surface(side1Edges=side1Edges, name='Surf-tip')
p = mdb.models['Model-1'].parts['Part-2']
e = p.edges
edges = s.getSequenceFromMask(mask=('[#4 ]', ), )
p.Set(edges=edges, name='Set-tip') | 1.898438 | 2 |
external/colors_script.py | corey1218/ZetaSploit | 1 | 12790396 | <filename>external/colors_script.py
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
class colors_script:
def __init__(self):
self.script_extension = "colors"
self.commands = {
'%black': '\033[30m',
'%red': '\033[31m',
'%green': '\033[32m',
'%yellow': '\033[33m',
'%blue': '\033[34m',
'%purple': '\033[35m',
'%cyan': '\033[36m',
'%white': '\033[77m',
'%end': '\033[0m',
'%bold': '\033[1m',
'%dark': '\033[2m',
'%bent': '\033[3m',
'%line': '\033[4m',
'%twink': '\033[5m',
'%back': '\033[7m',
'%newline': '\n'
}
def _read_file_lines(self, path):
lines = list()
with open(path) as file:
for line in file:
if line and line[0:8] != "%comment" and not line.isspace():
lines.append(line)
return lines
def _reverse_read_lines(self, path):
lines = list()
with open(path) as file:
for line in reversed(list(file)):
lines.append(line)
return lines
def _reversed_find_last_commands(self, lines):
buffer_commands = list()
for line in lines:
buffer_line = line
for command in self.commands.keys():
if command in buffer_line:
buffer_line = buffer_line.replace(command, " ")
if buffer_line.isspace():
buffer_commands.append(line.strip())
else:
break
buffer_commands.reverse()
return buffer_commands
def _remove_empty_lines(self, lines):
line_id = -1
for _ in range(len(lines)):
buffer_line = lines[line_id]
for command in self.commands.keys():
if command in buffer_line:
buffer_line = buffer_line.replace(command, " ")
if buffer_line.isspace():
lines.pop(line_id)
return lines
def parse_colors_script(self, path):
result = ""
lines = self._read_file_lines(path)
reversed_lines = self._reverse_read_lines(path)
last_commands = self._reversed_find_last_commands(reversed_lines)
last_commands = "".join(map(str, last_commands))
lines = self._remove_empty_lines(lines)
lines[-1] = lines[-1].strip('\n') + last_commands
if path.endswith(self.script_extension):
try:
buffer_commands = ""
for line in lines:
buffer_line = line
for command in self.commands.keys():
if command in buffer_line:
buffer_line = buffer_line.replace(command, " ")
if buffer_line.isspace():
buffer_commands += line.strip()
else:
line = buffer_commands + line
buffer_commands = ""
for command in self.commands.keys():
line = line.partition('%comment')[0]
line = line.replace(command, self.commands[command])
result += line
return result
except Exception:
return None
else:
return None
def compile_colors_script(self, path, outfile='a.out'):
result = self.parse_colors_script(path)
if result:
output = open(outfile, 'wb')
output.write(result.encode())
output.close()
| 2.296875 | 2 |
dicom_wsi/mods/mapping.py | m081429/dicom_wsi | 0 | 12790397 | <reponame>m081429/dicom_wsi
import mods.utils
# =====================================================================================
# Use this piece of code to automatically parse information from different slide types
# =====================================================================================
def map_aperio_features(cfg, wsi):
"""
Update attributes by mapping from vendor specific attributes to DICOM attributes
:param cfg:
:param wsi:
:return:
"""
cfg['OnTheFly'] = dict()
if not cfg.get('BaseAttributes').get('Manufacturer'):
cfg['BaseAttributes']['Manufacturer'] = wsi.get('openslide.vendor')
if not cfg.get('BaseAttributes').get('SeriesDescription'):
cfg['BaseAttributes']['SeriesDescription'] = str(wsi.get('aperio.ImageID'))
if not cfg.get('BaseAttributes').get('ContentTime'):
_, cfg = utils.make_time('ContentTime', wsi.get('aperio.Time'), cfg,
dict_element='SharedFunctionalGroupsSequence')
if not cfg.get('BaseAttributes').get('SeriesTime'):
_, cfg = utils.make_time('SeriesTime', wsi.get('aperio.Time'), cfg,
dict_element='SharedFunctionalGroupsSequence')
if not cfg.get('BaseAttributes').get('StudyTime'):
_, cfg = utils.make_time('StudyTime', wsi.get('aperio.Time'), cfg,
dict_element='SharedFunctionalGroupsSequence')
pv = wsi.get('openslide.mpp-x'), wsi.get('openslide.mpp-y')
cfg['OnTheFly']['PixelSpacing'] = [float(x) for x in pv]
return cfg
| 2.21875 | 2 |
make.py | skandupmanyu/facet | 37 | 12790398 | <filename>make.py<gh_stars>10-100
#!/usr/bin/env python3
"""
call the Python make file for the common conda build process residing in 'pytools'
"""
import os
import sys
SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
PYTOOLS_DIR = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir, "pytools"))
sys.path.insert(0, PYTOOLS_DIR)
# noinspection PyUnresolvedReferences
from make import run_make
run_make()
| 2.21875 | 2 |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/pavelib/paver_tests/test_database.py | osoco/better-ways-of-thinking-about-software | 3 | 12790399 | """
Tests for the Paver commands for updating test databases and its utility methods
"""
import os
import shutil
import tarfile
from tempfile import mkdtemp
from unittest import TestCase
from unittest.mock import call, patch, Mock
import boto
from pavelib import database
from pavelib.utils import db_utils
from pavelib.utils.db_utils import extract_files_from_zip
from pavelib.utils.envs import Env
from .utils import PaverTestCase
class TestPaverDbUtils(TestCase):
""" Tests for paver bokchoy database utils """
@patch('pavelib.utils.db_utils.verify_files_exist')
def test_extract_files_from_zip(self, _mock_verify):
test_dir = mkdtemp()
output_dir = mkdtemp()
self.addCleanup(shutil.rmtree, test_dir)
self.addCleanup(shutil.rmtree, output_dir)
tmp_file_name = os.path.join(test_dir, 'test.txt')
with open(tmp_file_name, 'w') as tmp_file:
tmp_file.write('Test file content')
tmp_tarfile = os.path.join(test_dir, 'test.tar.gz')
with tarfile.open(name=tmp_tarfile, mode='w:gz') as tar_file:
tar_file.add(tmp_file_name, arcname='test.txt')
extract_files_from_zip(['test.txt'], tmp_tarfile, output_dir)
extracted_file = os.path.join(output_dir, 'test.txt')
assert os.path.isfile(extracted_file)
with open(extracted_file) as test_file:
data = test_file.read()
assert data == 'Test file content'
def _write_temporary_db_cache_files(path, files):
"""
create some temporary files to act as the local db cache files so that
we can compute a fingerprint
"""
for index, filename in enumerate(files):
filepath = os.path.join(path, filename)
with open(filepath, 'w') as cache_file:
cache_file.write(str(index))
class TestPaverDatabaseTasks(PaverTestCase):
"""
Tests for the high level database tasks
"""
def setUp(self):
super().setUp()
# This value is the actual sha1 fingerprint calculated for the dummy
# files used in these tests
self.expected_fingerprint = '<PASSWORD>'
self.fingerprint_filename = f'{self.expected_fingerprint}.tar.gz'
self.bucket = Mock(name='test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_load_data_from_local_cache(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is the same as
the stored fingerprint, verify that we make a call to load data into
the database without running migrations
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# write the local fingerprint file with the same value than the
# computed fingerprint
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(self.expected_fingerprint)
with patch.object(db_utils, 'get_file_from_s3', wraps=db_utils.get_file_from_s3) as _mock_get_file:
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
# Make sure that the local cache files are used - NOT downloaded from s3
assert not _mock_get_file.called
calls = [
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations'),
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --use-existing-db')
]
_mock_sh.assert_has_calls(calls)
@patch.object(database, 'CACHE_BUCKET_NAME', 'test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_load_data_from_s3_fingerprint(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is different
than the stored fingerprint AND there is a matching fingerprint file
in s3, verify that we make a call to load data into the database
without running migrations
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# zip the temporary files and push them to s3 bucket
zipfile_path = os.path.join(db_utils.CACHE_FOLDER, self.fingerprint_filename)
with tarfile.open(name=zipfile_path, mode='w:gz') as tar_file:
for name in database.ALL_DB_FILES:
tar_file.add(os.path.join(db_utils.CACHE_FOLDER, name), arcname=name)
key = boto.s3.key.Key(bucket=self.bucket, name=self.fingerprint_filename)
key.set_contents_from_filename(zipfile_path, replace=False)
# write the local fingerprint file with a different value than
# the computed fingerprint
local_fingerprint = '1<PASSWORD>'
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(local_fingerprint)
with patch('boto.connect_s3', Mock(return_value=Mock())):
with patch.object(db_utils, 'get_file_from_s3') as _mock_get_file:
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
# Make sure that the fingerprint file is downloaded from s3
_mock_get_file.assert_called_once_with(
'test_bucket', self.fingerprint_filename, db_utils.CACHE_FOLDER
)
calls = [
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations'),
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --use-existing-db')
]
_mock_sh.assert_has_calls(calls)
@patch.object(database, 'CACHE_BUCKET_NAME', 'test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_load_data_and_run_migrations(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is different
than the stored fingerprint AND there is NO matching fingerprint file
in s3, verify that we make a call to load data into the database, run
migrations and update the local db cache files
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# write the local fingerprint file with a different value than
# the computed fingerprint
local_fingerprint = '12<PASSWORD>6<PASSWORD>'
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(local_fingerprint)
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
calls = [
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --calculate_migrations'),
call(f'{Env.REPO_ROOT}/scripts/reset-test-db.sh --rebuild_cache --use-existing-db')
]
_mock_sh.assert_has_calls(calls)
@patch.object(database, 'CACHE_BUCKET_NAME', 'test_bucket')
@patch.object(db_utils, 'CACHE_FOLDER', mkdtemp())
@patch.object(db_utils, 'FINGERPRINT_FILEPATH', os.path.join(mkdtemp(), 'fingerprint'))
@patch.object(db_utils, 'sh')
def test_updated_db_cache_pushed_to_s3(self, _mock_sh):
"""
Assuming that the computed db cache file fingerprint is different
than the stored fingerprint AND there is NO matching fingerprint file
in s3, verify that an updated fingeprint file is pushed to s3
"""
self.addCleanup(shutil.rmtree, db_utils.CACHE_FOLDER)
self.addCleanup(os.remove, db_utils.FINGERPRINT_FILEPATH)
_write_temporary_db_cache_files(db_utils.CACHE_FOLDER, database.ALL_DB_FILES)
# write the local fingerprint file with a different value than
# the computed fingerprint
local_fingerprint = '<PASSWORD>'
with open(db_utils.FINGERPRINT_FILEPATH, 'w') as fingerprint_file:
fingerprint_file.write(local_fingerprint)
database.update_local_bokchoy_db_from_s3() # pylint: disable=no-value-for-parameter
assert self.bucket.get_key(self.fingerprint_filename)
| 2.65625 | 3 |
nygame/font_cache.py | nfearnley/nygame | 1 | 12790400 | from functools import lru_cache
from typing import Optional
from pygame.freetype import get_default_font, SysFont
font_cache = {}
@lru_cache(100)
def get_font(fontname: Optional[str] = None, size: int = 12, bold: bool = False, italic: bool = False):
if fontname is None:
fontname = get_default_font()
return SysFont(fontname, size, bold=bold, italic=italic)
| 2.6875 | 3 |
ML/Regressions/DecisionTree/DecisionTree.py | acanozturk/ml-dl | 0 | 12790401 | <gh_stars>0
# Koltuk seviyesine göre fiyatlandırma
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
df = pd.read_csv("original.csv", sep =";", header = None) # Dataseti okuduk ve değerlerin ; ile ayrıldığını belirttik
x = df.iloc[:,0].values.reshape(-1,1) # iloc, dataframe içniden 0. indexteki değerleri alır
y = df.iloc[:,1].values.reshape(-1,1)
# Decision tree regression
tree_reg = DecisionTreeRegressor()
tree_reg.fit(x,y)
a = np.arange(min(x), max(x), 0.01).reshape(-1,1) # Aralıklarda prediction yapabilmek için, tier aralıklarında fiyat sabit kalmalı
y_head = tree_reg.predict(a)
# Görselleştirme
plt.scatter(x, y, color = 'red')
plt.plot(a, y_head, color = 'green')
plt.xlabel("Seat Tier")
plt.ylabel("Price")
plt.show() | 3.296875 | 3 |
whatsgoingon/api/eventful.py | warisp/whatsgoingon | 0 | 12790402 | import requests
def get_event(user_key, latitude, longitude):
url = "http://api.eventful.com/json/events/search?"
url += "&app_key=" + user_key
url += "&date=Future" #+ date
url += "&page_size=100"
url += "&sort_order=popularity"
url += "&sort_direction=descending"
url += "&q=music"
url += "&c=music"
url += "&where=" + latitude + "," + longitude + "&within=10&km"
data = requests.get(url).json()
if int(data["total_items"]) > 0:
return data["events"]["event"]
else:
return "404"
| 3.09375 | 3 |
setup.py | erstrom/hexfilter | 0 | 12790403 | <reponame>erstrom/hexfilter
#!/usr/bin/env python
from setuptools import setup
readme = open("README.rst").read()
setup(name="hexfilter",
version="0.2",
description="A library/tool for extracting hex dumps from log files",
url="https://github.com/erstrom/hexfilter",
author="<NAME>",
author_email="<EMAIL>",
license="MIT",
long_description=readme + "\n\n",
entry_points={
"console_scripts": ["hexfilter=hexfilter.__main__:main"]
},
packages=["hexfilter"],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Topic :: Software Development"
]
)
| 1.554688 | 2 |
packages/w3af/w3af/core/data/dc/utils/multipart.py | ZooAtmosphereGroup/HelloPackages | 3 | 12790404 | <filename>packages/w3af/w3af/core/data/dc/utils/multipart.py<gh_stars>1-10
"""
multipart.py
Copyright 2014 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import os
import mimetypes
from w3af.core.controllers.misc.io import is_file_like
from w3af.core.data.misc.encoding import smart_str
from w3af.core.data.constants.encodings import DEFAULT_ENCODING
def encode_as_multipart(multipart_container, boundary):
"""
Encode the DataContainer using multipart/post , given the provided boundary
:param multipart_container: The container to encode
:param boundary: Using this boundary (a random string)
:return: The post-data that should be sent
"""
v_vars, v_files = _split_vars_files(multipart_container)
_, data = multipart_encode(v_vars, v_files, boundary=boundary)
return data
def _split_vars_files(data):
"""
Based on the request it decides if we should send the request as
multipart or not.
:return: (List with string variables,
List with file variables)
"""
v_vars = []
v_files = []
for token in data.iter_tokens():
pname = token.get_name()
value = token.get_value()
enc_pname = smart_str(pname, encoding=DEFAULT_ENCODING, errors='ignore')
if is_file_like(value):
if not value.closed:
v_files.append((enc_pname, value))
else:
v_vars.append((enc_pname, ''))
elif hasattr(value, 'isFile'):
v_files.append((enc_pname, value))
else:
# Ensuring we actually send a string
value = smart_str(value, encoding=DEFAULT_ENCODING, errors='ignore')
v_vars.append((enc_pname, value))
return v_vars, v_files
def get_boundary():
"""
Before I used:
boundary = mimetools.choose_boundary()
But that returned some "private" information:
'127.0.0.1.1000.6267.1173556103.828.1'
Now I simply return a fixed string which I generated once and now re-use
all the time.
There is a reason for having a fixed boundary! When comparing two fuzzable
requests it's easier to do it if the boundaries are static. This allows
get_request_hash() to work as expected.
The problem with fixed boundaries is that they might be used to fingerprint
w3af, or that they might appear in the data we send to the wire and break
the request.
:return:
"""
return 'b08c02-53d780-e2bc43-1d5278-a3c0d9-a5c0d9'
def multipart_encode(_vars, files, boundary=None, _buffer=None):
if boundary is None:
boundary = get_boundary()
if _buffer is None:
_buffer = ''
for key, value in _vars:
_buffer += '--%s\r\n' % boundary
_buffer += 'Content-Disposition: form-data; name="%s"' % key
_buffer += '\r\n\r\n' + value + '\r\n'
for key, fd in files:
fd.seek(0)
filename = fd.name.split(os.path.sep)[-1]
guessed_mime = mimetypes.guess_type(filename)[0]
content_type = guessed_mime or 'application/octet-stream'
args = (smart_str(key, errors='ignore'), smart_str(filename, errors='ignore'))
_buffer += '--%s\r\n' % boundary
_buffer += 'Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % args
_buffer += 'Content-Type: %s\r\n' % content_type
_buffer += '\r\n%s\r\n' % fd.read()
_buffer += '--%s--\r\n\r\n' % boundary
return boundary, _buffer
| 2.40625 | 2 |
wikiquote/langs/fr.py | gcqmkm02/wikiquote | 55 | 12790405 | from typing import List, Text, Tuple
import logging
import re
import lxml
from .. import utils
MAIN_PAGE = "Wikiquote:Accueil"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
def extract_quotes(tree: lxml.html.HtmlElement, max_quotes: int) -> List[Text]:
# French wiki uses a "citation" HTML class
nodes = tree.xpath('//div[@class="citation"]')
quotes = [utils.clean_txt(node.text_content()) for node in nodes]
return quotes[:max_quotes]
def qotd_old_method(html_tree: lxml.html.HtmlElement) -> Tuple[Text, Text]:
tree = html_tree.get_element_by_id("mf-cdj")
tree = tree.xpath("div/div")[1].xpath("table/tbody/tr/td")[1]
quote = tree.xpath("div/i")[0].text_content()
author = tree.xpath("div/a")[0].text_content()
return quote, author
def qotd_new_method(html_tree: lxml.html.HtmlElement) -> Tuple[Text, Text]:
tree = html_tree.get_element_by_id("mf-cdj")
lines = [
line.strip().replace(u"\xa0", " ") for line in tree.text_content().splitlines()
]
for line in lines:
matches = re.search(r"«(.+?)»(.+)", line)
if not matches:
continue
quote = matches.group(1).strip()
author = matches.group(2).strip("-—– \n")
return quote, author
raise Exception("Could not parse quote of the day from page contents.")
def qotd(html_tree: lxml.html.HtmlElement) -> Tuple[Text, Text]:
try:
return qotd_new_method(html_tree)
except Exception as e:
logger.warning("Could not extract French QOTD using new method due to: %s", e)
return qotd_old_method(html_tree)
| 3.21875 | 3 |
release/stubs/Autodesk/AutoCAD/Internal/DatabaseServices.py | paoloemilioserra/ironpython-stubs | 0 | 12790406 | <reponame>paoloemilioserra/ironpython-stubs<gh_stars>0
# encoding: utf-8
# module Autodesk.AutoCAD.Internal.DatabaseServices calls itself DatabaseServices
# from Acmgd, Version=24.0.0.0, Culture=neutral, PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class EvalExpr(DBObject):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class EvalConnectable(EvalExpr):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class BlockElement(EvalConnectable):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
Name = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Name(self: BlockElement) -> str
Set: Name(self: BlockElement) = value
"""
class BlockParameter(BlockElement):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
def GetPropertyConnectionName(self, propName):
""" GetPropertyConnectionName(self: BlockParameter, propName: str) -> str """
pass
def GetPropertyValue(self, name, xform=None):
"""
GetPropertyValue(self: BlockParameter, name: str) -> object
GetPropertyValue(self: BlockParameter, name: str, xform: Matrix3d) -> (object, Matrix3d)
"""
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
PropertyDescription = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyDescription(self: BlockParameter) -> BlockParameterPropertyDescriptorCollection
"""
class Block1PointParameter(BlockParameter):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class Block2PointParameter(BlockParameter):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class BlockAction(BlockElement):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class BlockElementEntity(Entity):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
Element = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Element(self: BlockElementEntity) -> ObjectId
"""
class BlockActionEntity(BlockElementEntity):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class BlockFlipParameter(Block2PointParameter):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
BaseStateLabel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: BaseStateLabel(self: BlockFlipParameter) -> str
"""
FlippedStateLabel = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: FlippedStateLabel(self: BlockFlipParameter) -> str
"""
FlipState = None
class BlockLookupAction(BlockAction):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod
def DuplicateCellsInLookupColumn(aryTable, descArray, colIdx, outArray):
""" DuplicateCellsInLookupColumn(aryTable: Array, descArray: LookupColumnDescriptorCollection, colIdx: int) -> (bool, Array) """
pass
@staticmethod
def DuplicateRowsOverInputColumns(aryTable, descArray, outArray):
""" DuplicateRowsOverInputColumns(aryTable: Array, descArray: LookupColumnDescriptorCollection) -> (bool, Array) """
pass
def GetLookupTable(self, pDataTable, descArray):
""" GetLookupTable(self: BlockLookupAction) -> (Array, LookupColumnDescriptorCollection) """
pass
@staticmethod
def NonSingletonRangeInInputColumns(aryTable, descArray, outArray):
""" NonSingletonRangeInInputColumns(aryTable: Array, descArray: LookupColumnDescriptorCollection) -> (bool, Array) """
pass
@staticmethod
def NullsInInputColumns(aryTable, descArray, outArray):
""" NullsInInputColumns(aryTable: Array, descArray: LookupColumnDescriptorCollection) -> (bool, Array) """
pass
def SetLookupTable(self, aryTable, descArray):
""" SetLookupTable(self: BlockLookupAction, aryTable: Array, descArray: LookupColumnDescriptorCollection) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
NumberOfInputColumns = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: NumberOfInputColumns(self: BlockLookupAction) -> int
"""
NumberOfOutputColumns = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: NumberOfOutputColumns(self: BlockLookupAction) -> int
"""
NumberOfRows = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: NumberOfRows(self: BlockLookupAction) -> int
"""
class BlockLookupActionEntity(BlockActionEntity):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class BlockLookupParameter(Block1PointParameter):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class BlockParameterPropertyDescriptor(object):
# no doc
ConnectionName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: ConnectionName(self: BlockParameterPropertyDescriptor) -> str
"""
HasValueSet = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: HasValueSet(self: BlockParameterPropertyDescriptor) -> bool
"""
PropertyDescription = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyDescription(self: BlockParameterPropertyDescriptor) -> str
"""
PropertyName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyName(self: BlockParameterPropertyDescriptor) -> str
"""
PropertyType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyType(self: BlockParameterPropertyDescriptor) -> Int16
"""
ReadOnly = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: ReadOnly(self: BlockParameterPropertyDescriptor) -> bool
"""
UnitsType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: UnitsType(self: BlockParameterPropertyDescriptor) -> UnitsType
"""
ValueSetValues = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: ValueSetValues(self: BlockParameterPropertyDescriptor) -> Array
"""
Visible = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Visible(self: BlockParameterPropertyDescriptor) -> bool
"""
class BlockParameterPropertyDescriptorCollection(DisposableWrapper):
""" BlockParameterPropertyDescriptorCollection() """
def CopyTo(self, array, size):
""" CopyTo(self: BlockParameterPropertyDescriptorCollection, array: Array[BlockParameterPropertyDescriptor], size: int) """
pass
def Dispose(self):
""" Dispose(self: DisposableWrapper, A_0: bool) """
pass
def GetEnumerator(self):
""" GetEnumerator(self: BlockParameterPropertyDescriptorCollection) -> IEnumerator """
pass
def ICollectionCopyTo(self, array, size):
""" ICollectionCopyTo(self: BlockParameterPropertyDescriptorCollection, array: Array, size: int) """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args): #cannot find CLR method
""" x.__len__() <==> len(x) """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: BlockParameterPropertyDescriptorCollection) -> int
"""
IsSynchronized = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsSynchronized(self: BlockParameterPropertyDescriptorCollection) -> bool
"""
SyncRoot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: SyncRoot(self: BlockParameterPropertyDescriptorCollection) -> object
"""
class BlockUserParameter(Block1PointParameter):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
UserParamType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: UserParamType(self: BlockUserParameter) -> UserParameterType
"""
class EvalGraph(DBObject):
# no doc
def Dispose(self):
""" Dispose(self: DBObject, A_0: bool) """
pass
def GetAllNodes(self):
""" GetAllNodes(self: EvalGraph) -> Array[int] """
pass
def GetNode(self, nodeId, mode, pTrans):
""" GetNode(self: EvalGraph, nodeId: UInt32, mode: OpenMode, pTrans: Transaction) -> DBObject """
pass
@staticmethod # known case of __new__
def __new__(self, *args): #cannot find CLR constructor
""" __new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool) """
pass
class LookupColumnDescriptor(RXObject):
""" LookupColumnDescriptor() """
def Dispose(self):
""" Dispose(self: DisposableWrapper, A_0: bool) """
pass
@staticmethod # known case of __new__
def __new__(self):
"""
__new__(cls: type)
__new__(cls: type, unmanagedObjPtr: IntPtr, autoDelete: bool)
"""
pass
ConnectableId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: ConnectableId(self: LookupColumnDescriptor) -> UInt32
Set: ConnectableId(self: LookupColumnDescriptor) = value
"""
ConnectionName = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: ConnectionName(self: LookupColumnDescriptor) -> str
Set: ConnectionName(self: LookupColumnDescriptor) = value
"""
IsInvertible = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsInvertible(self: LookupColumnDescriptor) -> bool
Set: IsInvertible(self: LookupColumnDescriptor) = value
"""
IsOutputColumn = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsOutputColumn(self: LookupColumnDescriptor) -> bool
Set: IsOutputColumn(self: LookupColumnDescriptor) = value
"""
PropertyType = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyType(self: LookupColumnDescriptor) -> Int16
Set: PropertyType(self: LookupColumnDescriptor) = value
"""
PropertyUnits = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: PropertyUnits(self: LookupColumnDescriptor) -> UnitsType
Set: PropertyUnits(self: LookupColumnDescriptor) = value
"""
UnmatchedValue = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: UnmatchedValue(self: LookupColumnDescriptor) -> str
Set: UnmatchedValue(self: LookupColumnDescriptor) = value
"""
class LookupColumnDescriptorCollection(DisposableWrapper):
""" LookupColumnDescriptorCollection() """
def Add(self, value):
""" Add(self: LookupColumnDescriptorCollection, value: LookupColumnDescriptor) -> int """
pass
def Clear(self):
""" Clear(self: LookupColumnDescriptorCollection) """
pass
def CopyTo(self, array, size):
""" CopyTo(self: LookupColumnDescriptorCollection, array: Array[LookupColumnDescriptor], size: int) """
pass
def Dispose(self):
""" Dispose(self: DisposableWrapper, A_0: bool) """
pass
def GetEnumerator(self):
""" GetEnumerator(self: LookupColumnDescriptorCollection) -> IEnumerator """
pass
def ICollectionCopyTo(self, array, size):
""" ICollectionCopyTo(self: LookupColumnDescriptorCollection, array: Array, size: int) """
pass
def Insert(self, index, value):
""" Insert(self: LookupColumnDescriptorCollection, index: int, value: LookupColumnDescriptor) """
pass
def Remove(self, value):
""" Remove(self: LookupColumnDescriptorCollection, value: LookupColumnDescriptor) """
pass
def RemoveAt(self, index):
""" RemoveAt(self: LookupColumnDescriptorCollection, index: int) """
pass
def __add__(self, *args): #cannot find CLR method
""" x.__add__(y) <==> x+y """
pass
def __getitem__(self, *args): #cannot find CLR method
""" x.__getitem__(y) <==> x[y] """
pass
def __iter__(self, *args): #cannot find CLR method
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self, *args): #cannot find CLR method
""" x.__len__() <==> len(x) """
pass
def __setitem__(self, *args): #cannot find CLR method
""" x.__setitem__(i, y) <==> x[i]= """
pass
Count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: Count(self: LookupColumnDescriptorCollection) -> int
"""
IsSynchronized = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: IsSynchronized(self: LookupColumnDescriptorCollection) -> bool
"""
SyncRoot = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""Get: SyncRoot(self: LookupColumnDescriptorCollection) -> object
"""
class UnitsType(Enum):
""" enum UnitsType, values: Angular (1), Area (3), Distance (2), NoUnits (0) """
Angular = None
Area = None
Distance = None
NoUnits = None
value__ = None
class UserParameterType(Enum):
""" enum UserParameterType, values: Angle (4), Area (1), Distance (0), Scalar (3), String (5), Volume (2) """
Angle = None
Area = None
Distance = None
Scalar = None
String = None
value__ = None
Volume = None
| 1.921875 | 2 |
src/send_sms/send_sms.py | nvk1196/flashshipper | 0 | 12790407 | <reponame>nvk1196/flashshipper
# Download the helper library from https://www.twilio.com/docs/python/install
# Your Account Sid and Auth Token from twilio.com/console
# DANGER! This is insecure. See http://twil.io/secure
# my_msg = "This is my message with stuff and things"
# send_to_phone_number = "+15184282729"
# client = Client(account_sid, auth_token)
# message = client.messages \
# .create(
# body = my_msg,
# from_ = my_phone_number,
# to = send_to_phone_number
# )
# print(message.sid)
# print("SMS is sent!")
# Run this is terminal for window
# set TWILIO_ACCOUNT_SID=ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# set TWILIO_AUTH_TOKEN=your_auth_token
# import os
# account_sid = os.environ['TWILIO_ACCOUNT_SID']
# auth_token = os.environ['TWILIO_AUTH_TOKEN']
# https://github.com/twilio/twilio-python/issues/409
# https://stackoverflow.com/questions/43597379/how-to-provide-proxy-information-to-twilio-api-with-python/43608637#43608637
# https://help.pythonanywhere.com/pages/TwilioBehindTheProxy/
# #watch and learn
# https://www.youtube.com/watch?v=x4E4mbobGEc
account_sid = "AC9f05e812b5954718ee0e25f764ec5c6d"
auth_token = "9f59fde1d33f14857fbf5615be881388"
my_phone_number = "+15012632545" #Twilio gave me this phone number
#--LOCALHOST--
from twilio.rest import Client
def send_text (my_msg, send_to_phone_number):
client = Client(account_sid, auth_token)
message = client.messages \
.create(
body = my_msg,
from_ = my_phone_number,
to = send_to_phone_number
)
print("SMS is sent!")
#--PRODUCTION--
# import os
# from twilio.rest import Client
# from twilio.http.http_client import TwilioHttpClient
# def send_text (my_msg, send_to_phone_number):
# proxy_client = TwilioHttpClient()
# proxy_client.session.proxies = {'https': os.environ['https_proxy']}
# client = Client(account_sid, auth_token, http_client=proxy_client)
# # twilio api calls will now work from behind the proxy:
# message = client.messages.create(to = send_to_phone_number , from_ = my_phone_number, body = my_msg)
# print("SMS is sent!")
# print(message.sid)
| 2.375 | 2 |
tests/test_legendary_item.py | bonetou/GildedRose-Refactoring-Kata | 0 | 12790408 | <reponame>bonetou/GildedRose-Refactoring-Kata<filename>tests/test_legendary_item.py
import pytest
from src.items.legendary import LegendaryItem
from src.items.exceptions.invalid_quality_value import InvalidQualityValue
@pytest.fixture
def sulfuras():
return LegendaryItem('Sulfuras', 10, 80)
def test_should_not_change_sell_in_when_updated(sulfuras):
sulfuras.update_quality()
assert sulfuras.get_sell_in() == 10
def test_should_not_change_quality_value_when_updated(sulfuras):
sulfuras.update_quality()
assert sulfuras.get_quality() == 80
def test_should_create_legendary_item_when_quality_value_is_greater_than_normal_items_maximum_value():
LegendaryItem('Super Legendary Item', 10, 100)
def test_should_not_create_legendary_item_when_quality_value_is_less_than_minimum_value():
with pytest.raises(InvalidQualityValue):
LegendaryItem('Invalid Legendary Item', 2, -1) | 2.375 | 2 |
backend/src/baserow/contrib/database/migrations/0061_change_decimal_places.py | ashishdhngr/baserow | 0 | 12790409 | # Generated by Django 3.2.6 on 2022-02-14 13:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("database", "0060_set_ordering_on_tablewebhook_models"),
]
operations = [
migrations.AlterField(
model_name="numberfield",
name="number_decimal_places",
field=models.IntegerField(
choices=[
(0, "1"),
(1, "1.0"),
(2, "1.00"),
(3, "1.000"),
(4, "1.0000"),
(5, "1.00000"),
],
default=0,
help_text="The amount of digits allowed after the point.",
),
),
]
| 1.734375 | 2 |
tests/integration/test_networks.py | unparalleled-js/ape | 210 | 12790410 | <filename>tests/integration/test_networks.py
import pytest
from eth_typing import HexStr
@pytest.mark.parametrize("block_id", ("latest", 0, "0", "0x0", HexStr("0x0")))
def test_get_block(eth_tester_provider, block_id):
latest_block = eth_tester_provider.get_block(block_id)
# Each parameter is the same as requesting the first block.
assert latest_block.number == 0
assert latest_block.gas_data.base_fee == 1000000000
assert latest_block.gas_data.gas_used == 0
| 1.96875 | 2 |
job/sample_postgres_aws_sqs_job.py | Wonong/ab-metadata-publisher | 1 | 12790411 | <reponame>Wonong/ab-metadata-publisher<filename>job/sample_postgres_aws_sqs_job.py
import textwrap
import logging
import logging.config
import os
from pyhocon import ConfigFactory
from databuilder.extractor.postgres_metadata_extractor import PostgresMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.loader.file_system_neo4j_csv_loader import FsNeo4jCSVLoader
from databuilder.job.job import DefaultJob
from databuilder.task.task import DefaultTask
from publisher import aws_sqs_csv_puiblisher
from publisher.aws_sqs_csv_puiblisher import AWSSQSCsvPublisher
logging_config_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), '../config/logging_config.ini')
logging.config.fileConfig(logging_config_file_path)
LOGGER = logging.getLogger()
# TODO: AWS SQS url, region and credentials need to change
AWS_SQS_REGION = os.getenv('AWS_SQS_REGION', 'ap-northeast-2')
AWS_SQS_URL = os.getenv('AWS_SQS_URL', 'https://sqs.ap-northeast-2.amazonaws.com')
AWS_SQS_ACCESS_KEY_ID = os.getenv('AWS_SQS_ACCESS_KEY_ID', '')
AWS_SQS_SECRET_ACCESS_KEY = os.getenv('AWS_SQS_SECRET_ACCESS_KEY', '')
# TODO: connection string needs to change
# Source DB configuration
DATABASE_HOST = os.getenv('DATABASE_HOST', 'localhost')
DATABASE_PORT = os.getenv('DATABASE_PORT', '5432')
DATABASE_USER = os.getenv('DATABASE_USER', 'psql')
DATABASE_PASSWORD = os.getenv('DATABASE_PASSWORD', '<PASSWORD>')
DATABASE_DB_NAME = os.getenv('DATABASE_DB_NAME', 'postgres')
DATABASE_SCHEMA = os.getenv('DATABASE_SCHEMA', 'public')
POSTGRES_CONN_STRING = \
f'postgresql://{DATABASE_USER}:{DATABASE_PASSWORD}@{DATABASE_HOST}:{DATABASE_PORT}/{DATABASE_DB_NAME}'
def run_mysql_job() -> DefaultJob:
where_clause_suffix = textwrap.dedent(f"""
where c.table_schema = '{DATABASE_SCHEMA}'
""")
tmp_folder = '/var/tmp/amundsen/table_metadata'
node_files_folder = '{tmp_folder}/nodes/'.format(tmp_folder=tmp_folder)
relationship_files_folder = '{tmp_folder}/relationships/'.format(tmp_folder=tmp_folder)
job_config = ConfigFactory.from_dict({
'extractor.postgres_metadata.{}'.format(PostgresMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY):
where_clause_suffix,
'extractor.postgres_metadata.{}'.format(PostgresMetadataExtractor.USE_CATALOG_AS_CLUSTER_NAME):
True,
'extractor.postgres_metadata.extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
POSTGRES_CONN_STRING,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.NODE_DIR_PATH):
node_files_folder,
'loader.filesystem_csv_neo4j.{}'.format(FsNeo4jCSVLoader.RELATION_DIR_PATH):
relationship_files_folder,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.NODE_FILES_DIR):
node_files_folder,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.RELATION_FILES_DIR):
relationship_files_folder,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.AWS_SQS_REGION):
AWS_SQS_REGION,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.AWS_SQS_URL):
AWS_SQS_URL,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.AWS_SQS_ACCESS_KEY_ID):
AWS_SQS_ACCESS_KEY_ID,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.AWS_SQS_SECRET_ACCESS_KEY):
AWS_SQS_SECRET_ACCESS_KEY,
'publisher.awssqs.{}'.format(aws_sqs_csv_puiblisher.JOB_PUBLISH_TAG):
'unique_tag', # should use unique tag here like {ds}
})
job = DefaultJob(conf=job_config,
task=DefaultTask(extractor=PostgresMetadataExtractor(), loader=FsNeo4jCSVLoader()),
publisher=AWSSQSCsvPublisher())
return job
if __name__ == "__main__":
mysql_job = run_mysql_job()
mysql_job.launch()
| 1.742188 | 2 |
1 - EstruturaSequencial/ex_9.py | FelipeMontLima/Lista_de_exercicio_python_brasil | 0 | 12790412 | <filename>1 - EstruturaSequencial/ex_9.py<gh_stars>0
"""
9 -> Faça um Programa que peça a temperatura em graus Fahrenheit, transforme e
mostre a temperatura em graus Celsius.
"""
while True:
f = input('Digite a temperatura em graus Fahrenheit: ')
if f.isdigit():
f = int(f)
c = (f - 32) / 1.8
print(f'A temperatura em {f}fº convertida para {c:.2f}º.')
break
else:
print('É preciso digitar um valor para conversão.') | 3.5625 | 4 |
Exercise/_DCGAN_CIFAR10/DCGAN_CIFAR10.py | Ninei/GANs | 0 | 12790413 | from IPython import display
from torch.utils.data import DataLoader
from torchvision import transforms, datasets
import os
import tensorflow as tf
from tensorflow import nn, layers
from tensorflow.contrib import layers as clayers
import numpy as np
import errno
import torchvision.utils as vutils
from tensorboardX import SummaryWriter
from matplotlib import pyplot as plt
import torch
# Output Directory
OUTPUT_PATH = os.path.join(os.path.abspath(__file__+ "../../"), '.output/')
DATASET_PATH = os.path.join(os.path.abspath(__file__+ "../../"), '.dataset/CIFAR/')
if not os.path.exists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH)
if not os.path.exists(DATASET_PATH): os.makedirs(DATASET_PATH)
def cifar_data():
compose = transforms.Compose([transforms.Resize(64),transforms.ToTensor(),transforms.Normalize((.5, .5, .5), (.5, .5, .5)),])
return datasets.CIFAR10(root=DATASET_PATH, train=True, download=True, transform=compose)
dataset = cifar_data()
batch_size = 100
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
num_batches = len(dataloader)
IMAGES_SHAPE = (64, 64, 3)
NOISE_SIZE = 100
def default_conv2d(inputs, filters):
return layers.conv2d(
inputs,
filters=filters,
kernel_size=4,
strides=(2, 2),
padding='same',
data_format='channels_last',
use_bias=False,
)
def default_conv2d_transpose(inputs, filters):
return layers.conv2d_transpose(
inputs,
filters=filters,
kernel_size=4,
strides=(2, 2),
padding='same',
data_format='channels_last',
use_bias=False,
)
def noise(n_rows, n_cols):
return np.random.normal(size=(n_rows, n_cols))
def discriminator(x):
with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
with tf.variable_scope("conv1"):
conv1 = default_conv2d(x, 128)
conv1 = nn.leaky_relu(conv1,alpha=0.2)
with tf.variable_scope("conv2"):
conv2 = default_conv2d(conv1, 256)
conv2 = layers.batch_normalization(conv2)
conv2 = nn.leaky_relu(conv2,alpha=0.2)
with tf.variable_scope("conv3"):
conv3 = default_conv2d(conv2, 512)
conv3 = layers.batch_normalization(conv3)
conv3 = nn.leaky_relu(conv3,alpha=0.2)
with tf.variable_scope("conv4"):
conv4 = default_conv2d(conv3, 1024)
conv4 = layers.batch_normalization(conv3)
conv4 = nn.leaky_relu(conv3,alpha=0.2)
with tf.variable_scope("linear"):
linear = clayers.flatten(conv4)
linear = clayers.fully_connected(linear, 1)
with tf.variable_scope("out"):
out = nn.sigmoid(linear)
return out
def generator(z):
with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):
with tf.variable_scope("linear"):
linear = clayers.fully_connected(z, 1024 * 4 * 4)
with tf.variable_scope("conv1_transp"):
# Reshape as 4x4 images
conv1 = tf.reshape(linear, (-1, 4, 4, 1024))
conv1 = default_conv2d_transpose(conv1, 512)
conv1 = layers.batch_normalization(conv1)
conv1 = nn.relu(conv1)
with tf.variable_scope("conv2_transp"):
conv2 = default_conv2d_transpose(conv1, 256)
conv2 = layers.batch_normalization(conv2)
conv2 = nn.relu(conv2)
with tf.variable_scope("conv3_transp"):
conv3 = default_conv2d_transpose(conv2, 128)
conv3 = layers.batch_normalization(conv3)
conv3 = nn.relu(conv3)
with tf.variable_scope("conv4_transp"):
conv4 = default_conv2d_transpose(conv3, 3)
with tf.variable_scope("out"):
out = tf.tanh(conv4)
return out
## Real Input
real_sample = tf.placeholder(tf.float32, shape=(None, )+IMAGES_SHAPE)
## Latent Variables / Noise
noise_sample = tf.placeholder(tf.float32, shape=(None, NOISE_SIZE))
# Generator
G_sample = generator(noise_sample)
# Discriminator
D_real = discriminator(real_sample)
D_fake = discriminator(G_sample)
# Generator
G_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_fake, labels=tf.ones_like(D_fake)
)
)
# Discriminator
D_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_real, labels=tf.ones_like(D_real)
)
)
D_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
logits=D_fake, labels=tf.zeros_like(D_fake)
)
)
D_loss = D_loss_real + D_loss_fake
# Obtain trainable variables for both networks
train_vars = tf.trainable_variables()
G_vars = [var for var in train_vars if 'generator' in var.name]
D_vars = [var for var in train_vars if 'discriminator' in var.name]
num_epochs = 200
G_opt = tf.train.AdamOptimizer(2e-4).minimize(G_loss, var_list=G_vars,)
D_opt = tf.train.AdamOptimizer(2e-4).minimize(D_loss, var_list=D_vars,)
num_test_samples = 16
test_noise = noise(num_test_samples, NOISE_SIZE)
BATCH_SIZE = 100
NUM_EPOCHS = 200
# session = tf.InteractiveSession()
# tf.global_variables_initializer().run(session=session)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
logger = Logger(model_name='DCGAN1', data_name='CIFAR10', root_path=OUTPUT_PATH)
# Iterate through epochs
for epoch in range(NUM_EPOCHS):
for n_batch, (batch,_) in enumerate(dataloader):
# 1. Train Discriminator
X_batch = batch.permute(0, 2, 3, 1).numpy()
feed_dict = {real_sample: X_batch, noise_sample: noise(BATCH_SIZE, NOISE_SIZE)}
_, d_error, d_pred_real, d_pred_fake = sess.run([D_opt, D_loss, D_real, D_fake], feed_dict=feed_dict)
# 2. Train Generator
feed_dict = {noise_sample: noise(BATCH_SIZE, NOISE_SIZE)}
_, g_error = sess.run([G_opt, G_loss], feed_dict=feed_dict)
# if n_batch % 10 == 0:
logger.display_status(epoch, num_epochs, n_batch, num_batches,d_error, g_error, d_pred_real, d_pred_fake)
if n_batch % 100 == 0:
display.clear_output(True)
# Generate images from test noise
test_images = sess.run(G_sample, feed_dict={noise_sample: test_noise})
# Log Images
logger.log_images(test_images, num_test_samples, epoch, n_batch, num_batches, format='NHWC');
# Log Status
logger.display_status(epoch, num_epochs, n_batch, num_batches,d_error, g_error, d_pred_real, d_pred_fake)
class Logger:
def __init__(self, model_name, data_name, root_path):
self.model_name = model_name
self.data_name = data_name
self.comment = '{}_{}'.format(model_name, data_name)
self.data_subdir = '{}/{}'.format(model_name, data_name)
# TensorBoard
self.writer = SummaryWriter(comment=self.comment)
self.rootPath = root_path
def log(self, d_error, g_error, epoch, n_batch, num_batches):
# var_class = torch.autograd.variable.Variable
if isinstance(d_error, torch.autograd.Variable):
d_error = d_error.data.cpu().numpy()
if isinstance(g_error, torch.autograd.Variable):
g_error = g_error.data.cpu().numpy()
step = Logger._step(epoch, n_batch, num_batches)
self.writer.add_scalar(
'{}/D_error'.format(self.comment), d_error, step)
self.writer.add_scalar(
'{}/G_error'.format(self.comment), g_error, step)
def log_images(self, images, num_images, epoch, n_batch, num_batches, format='NCHW', normalize=True):
'''
input images are expected in format (NCHW)
'''
if type(images) == np.ndarray:
images = torch.from_numpy(images)
if format=='NHWC':
images = images.transpose(1,3)
step = Logger._step(epoch, n_batch, num_batches)
img_name = '{}/images{}'.format(self.comment, '')
# Make horizontal grid from image tensor
horizontal_grid = vutils.make_grid(images, normalize=normalize, scale_each=True)
# Make vertical grid from image tensor
nrows = int(np.sqrt(num_images))
grid = vutils.make_grid(images, nrow=nrows, normalize=True, scale_each=True)
# Add horizontal images to tensorboard
self.writer.add_image(img_name, horizontal_grid, step)
# Save plots
self.save_torch_images(horizontal_grid, grid, epoch, n_batch)
print("Save Log Image")
def save_torch_images(self, horizontal_grid, grid, epoch, n_batch, plot_horizontal=True):
out_dir = (self.rootPath+'/images/{}').format(self.data_subdir)
Logger._make_dir(out_dir)
# Plot and save horizontal
fig = plt.figure(figsize=(16, 16))
plt.imshow(np.moveaxis(horizontal_grid.numpy(), 0, -1))
plt.axis('off')
if plot_horizontal:
display.display(plt.gcf())
self._save_images(fig, epoch, n_batch, 'hori')
plt.close()
# Save squared
fig = plt.figure()
plt.imshow(np.moveaxis(grid.numpy(), 0, -1))
plt.axis('off')
self._save_images(fig, epoch, n_batch)
plt.close()
def _save_images(self, fig, epoch, n_batch, comment=''):
out_dir = (self.rootPath+'/images/{}').format(self.data_subdir)
Logger._make_dir(out_dir)
fig.savefig('{}/{}_epoch_{}_batch_{}.png'.format(out_dir,comment, epoch, n_batch))
def display_status(self, epoch, num_epochs, n_batch, num_batches, d_error, g_error, d_pred_real, d_pred_fake):
# var_class = torch.autograd.variable.Variable
if isinstance(d_error, torch.autograd.Variable):
d_error = d_error.data.cpu().numpy()
if isinstance(g_error, torch.autograd.Variable):
g_error = g_error.data.cpu().numpy()
if isinstance(d_pred_real, torch.autograd.Variable):
d_pred_real = d_pred_real.data
if isinstance(d_pred_fake, torch.autograd.Variable):
d_pred_fake = d_pred_fake.data
print('Epoch: [{}/{}], Batch Num: [{}/{}]'.format(
epoch,num_epochs, n_batch, num_batches)
)
print('Discriminator Loss: {:.4f}, Generator Loss: {:.4f}'.format(d_error, g_error))
print('D(x): {:.4f}, D(G(z)): {:.4f}'.format(d_pred_real.mean(), d_pred_fake.mean()))
def save_models(self, generator, discriminator, epoch):
out_dir = (self.rootPath+'/models/{}').format(self.data_subdir)
Logger._make_dir(out_dir)
torch.save(generator.state_dict(),
'{}/G_epoch_{}'.format(out_dir, epoch))
torch.save(discriminator.state_dict(),
'{}/D_epoch_{}'.format(out_dir, epoch))
def close(self):
self.writer.close()
# Private Functionality
@staticmethod
def _step(epoch, n_batch, num_batches):
return epoch * num_batches + n_batch
@staticmethod
def _make_dir(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| 2.40625 | 2 |
run_gitlab_ci.py | Billmvp73/attention-sampling | 0 | 12790414 | <filename>run_gitlab_ci.py<gh_stars>0
#!/usr/bin/env python
#
# Copyright (c) 2019 Idiap Research Institute, http://www.idiap.ch/
# Written by <NAME> <<EMAIL>>
#
import argparse
import os
from os import path
from subprocess import call
import tempfile
import yaml
SCRIPT_TPL = """#!/bin/bash
git clone . {dir}/project
{commands}
"""
RESERVED_NAMES = [
"image",
"services",
"stages",
"types",
"before_script",
"after_script",
"variables",
"cache"
]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Approximate running the scripts from the .gitlab-ci.yml"
)
parser.add_argument(
"--build_dir",
help="Set a build directory"
)
args = parser.parse_args()
# Make the temporary build directory
build_dir = tempfile.mkdtemp(dir=args.build_dir)
print("Building in", build_dir)
# Collect the commands from the yaml file
commands = []
pipeline = yaml.load(open(".gitlab-ci.yml"))
for stage in pipeline["stages"]:
for k in pipeline:
if k in RESERVED_NAMES:
continue
job = pipeline[k]
if not job.get("stage", "test") == stage:
continue
commands.append("cd {}/project".format(build_dir))
commands.extend(job["script"])
# Build the script
script = SCRIPT_TPL.format(
dir=build_dir,
commands="\n".join(commands)
)
handle, script_file = tempfile.mkstemp(dir=build_dir)
os.close(handle)
with open(script_file, "w") as f:
f.write(script)
# Execute it
call(["/bin/bash", script_file])
| 2.59375 | 3 |
_includes/code/construct-binary-tree-from-preorder-and-inorder-traversal/solution.py | rajat19/interview-questions | 0 | 12790415 | <reponame>rajat19/interview-questions
from typing import List, Optional
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> Optional[TreeNode]:
return self.helper(preorder, inorder, 0, 0, len(inorder) - 1)
def helper(self, preorder, inorder, prestart, instart, inend):
if prestart > len(preorder) or instart > inend:
return None
root = TreeNode(preorder[prestart])
index = instart
while index <= inend:
if inorder[index] == preorder[prestart]:
break
index += 1
root.left = self.helper(preorder, inorder, prestart+1, instart, index-1);
root.right = self.helper(preorder, inorder, prestart+index-instart+1, index+1, inend);
return root | 3.84375 | 4 |
lino_xl/lib/vat/__init__.py | khchine5/xl | 1 | 12790416 | # -*- coding: UTF-8 -*-
# Copyright 2013-2017 <NAME>
# License: BSD (see file COPYING for details)
"""See :doc:`/specs/vat`.
.. autosummary::
:toctree:
utils
.. fixtures.novat fixtures.euvatrates
"""
from django.utils.translation import ugettext_lazy as _
from lino.api import ad
import six
class Plugin(ad.Plugin):
"""The :class:`Plugin <lino.core.plugin.Plugin>` object for this
plugin.
"""
verbose_name = _("VAT")
needs_plugins = ['lino_xl.lib.countries', 'lino_xl.lib.ledger']
default_vat_regime = 'normal'
"""The default VAT regime. If this is specified as a string, Lino will
resolve it at startup into an item of :class:`VatRegimes
<lino_xl.lib.vat.VatRegimes>`.
"""
default_vat_class = 'normal'
"""The default VAT class. If this is specified as a string, Lino will
resolve it at startup into an item of :class:`VatClasses
<lino_xl.lib.vat.VatClasses>`.
"""
def get_vat_class(self, tt, item):
"""Return the VAT class to be used for given trade type and given
invoice item. Return value must be an item of
:class:`lino_xl.lib.vat.VatClasses`.
"""
return self.default_vat_class
def on_site_startup(self, site):
vat = site.modules.vat
if isinstance(self.default_vat_regime, six.string_types):
self.default_vat_regime = vat.VatRegimes.get_by_name(
self.default_vat_regime)
if isinstance(self.default_vat_class, six.string_types):
self.default_vat_class = vat.VatClasses.get_by_name(
self.default_vat_class)
def setup_reports_menu(self, site, user_type, m):
mg = site.plugins.accounts
# mg = site.plugins.vat
# mg = self
m = m.add_menu(mg.app_label, mg.verbose_name)
m.add_action('vat.PrintableInvoicesByJournal')
m.add_action('vat.IntracomPurchases')
m.add_action('vat.IntracomSales')
def setup_explorer_menu(self, site, user_type, m):
m = m.add_menu(self.app_label, self.verbose_name)
m.add_action('vat.VatAreas')
m.add_action('vat.VatRegimes')
m.add_action('vat.VatClasses')
m.add_action('vat.VatColumns')
m.add_action('vat.Invoices')
m.add_action('vat.VatRules')
# m.add_action('vat.InvoiceItems')
| 2.234375 | 2 |
Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/relay/ir_pass.py | mengkai94/training_results_v0.6 | 64 | 12790417 | <reponame>mengkai94/training_results_v0.6<filename>Fujitsu/benchmarks/resnet/implementations/mxnet/3rdparty/tvm/python/tvm/relay/ir_pass.py
# pylint: disable=no-else-return,
# pylint: disable=unidiomatic-typecheck
"""The set of passes for Relay.
Exposes an interface for configuring the passes and scripting
them in Python.
"""
from . import _ir_pass
from . import _make
# pylint: disable=invalid-name
def infer_type(env, expr):
"""Infer the type of expr under the context of env.
Parameters
----------
env : relay.Environment
The global environment.
expr : relay.Expr
The input expression.
Returns
-------
checked_expr : relay.Expr
The checked expression.
"""
return _ir_pass.infer_type(env, expr)
well_formed = _ir_pass.well_formed
check_kind = _ir_pass.check_kind
free_vars = _ir_pass.free_vars
free_type_vars = _ir_pass.free_type_vars
def dead_code_elimination(e):
""" Remove expressions which does not effect the program result (dead code).
Parameters
----------
e: relay.Expr
The input Expression
Returns
-------
result: relay.Expr
An expression which is semantically equal to the input expression,
but with dead code removed.
"""
return _ir_pass.dead_code_elimination(e)
def alpha_equal(lhs, rhs):
"""Compare two Relay expr for structural equivalence (alpha equivalence).
Parameters
----------
lhs: relay.Expr
One of the input Expression.
rhs: relay.Expr
One of the input Expression.
Returns
-------
result: bool
True iff lhs is alpha equal to rhs.
"""
return bool(_make._alpha_equal(lhs, rhs))
| 2.171875 | 2 |
code/insert_sensordata_from_azure_queue.py | jurjanbrust/wsl2_mysql_grafana | 0 | 12790418 | <reponame>jurjanbrust/wsl2_mysql_grafana<gh_stars>0
from azure.storage.queue import (
QueueClient,
TextBase64EncodePolicy,
TextBase64DecodePolicy
)
import os, uuid, time, json
import mysql.connector
from datetime import datetime
connect_str = "DefaultEndpointsProtocol=https;AccountName=replace;AccountKey=replacewithyours;EndpointSuffix=core.windows.net"
queue_name = "name of queue"
mySql_dbName = "sensordata"
mySql_tableName = "temperature"
queue_client = QueueClient.from_connection_string(conn_str=connect_str, queue_name=queue_name, message_decode_policy=TextBase64DecodePolicy())
messages = queue_client.receive_messages(messages_per_page=5)
db = mysql.connector.connect(
host="db",
user="root",
passwd="<PASSWORD>",
database=mySql_dbName
)
cursor = db.cursor()
def processMessage():
message_json = json.loads(message.content)
payload_raw = message_json["payload_raw"]
payload_bytes = bytes(payload_raw, 'ascii')
sensor_counter = payload_bytes[0] + 256 * payload_bytes[1]
sensor_temperature = payload_bytes[2] + (payload_bytes[3] / 100)
sensor_time = message_json["metadata"]["time"][0: 19]
sensor_latitude = message_json["metadata"]["latitude"]
sensor_longitude = message_json["metadata"]["longitude"]
sensor_rssi = message_json["metadata"]["gateways"][0]["rssi"]
sensor_dev_id = message_json["dev_id"]
sensor_app_id = message_json["app_id"]
sensor_hardware_serial = message_json["hardware_serial"]
print("counter: " + str(sensor_counter) + " temp: " + str(sensor_temperature))
sql = "INSERT INTO " + mySql_tableName + " (counter, temperature, time, latitude, longitude, rssi, dev_id, app_id, hardware_serial) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (sensor_counter, sensor_temperature, sensor_time, sensor_latitude, sensor_longitude, sensor_rssi, sensor_dev_id, sensor_app_id, sensor_hardware_serial)
try:
cursor.execute(sql, val)
db.commit()
except Exception as ex:
print(ex)
for message in messages:
processMessage()
queue_client.delete_message(message.id, message.pop_receipt)
time.sleep(0.1)
print("All Done")
| 2.09375 | 2 |
app/admin/__init__.py | RandyDeng/InterviewScheduler | 0 | 12790419 | from flask import Blueprint
admin = Blueprint('admin', __name__, url_prefix='/admin',
template_folder='templates')
| 1.59375 | 2 |
alipay/aop/api/domain/AlipayUserApplepayProvisioningbundleCreateModel.py | antopen/alipay-sdk-python-all | 213 | 12790420 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayUserApplepayProvisioningbundleCreateModel(object):
def __init__(self):
self._alipay_user_identifier = None
@property
def alipay_user_identifier(self):
return self._alipay_user_identifier
@alipay_user_identifier.setter
def alipay_user_identifier(self, value):
self._alipay_user_identifier = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_identifier:
if hasattr(self.alipay_user_identifier, 'to_alipay_dict'):
params['alipay_user_identifier'] = self.alipay_user_identifier.to_alipay_dict()
else:
params['alipay_user_identifier'] = self.alipay_user_identifier
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayUserApplepayProvisioningbundleCreateModel()
if 'alipay_user_identifier' in d:
o.alipay_user_identifier = d['alipay_user_identifier']
return o
| 2.28125 | 2 |
e3nn/util/codegen/_mixin.py | simonbatzner/e3nn | 0 | 12790421 | from typing import Dict
from ._eval import eval_code
class CodeGenMixin:
"""Mixin for classes that dynamically generate some of their methods.
This class manages evaluating generated code for subclasses while remaining pickle/deepcopy compatible. If subclasses need to override ``__getstate__``/``__setstate__``, they should be sure to call CodeGenMixin's first and use its output.
"""
def _codegen_register(
self,
funcs: Dict[str, str],
compile: bool = True
) -> None:
"""Register dynamically generated methods.
Parameters
----------
funcs : Dict[str, str]
Dictionary mapping method names to their code.
"""
if not hasattr(self, "__codegen__"):
# func_name -> code
self.__codegen__ = {}
self.__codegen__.update(funcs)
if compile:
self._codegen_compile()
def _codegen_compile(self):
"""Compile and set all registered dynamically generated methods."""
if hasattr(self, "__codegen__"):
# Compile the generated or static code
for fname, code in self.__codegen__.items():
setattr(self, fname, eval_code(code).main)
# In order to support copy.deepcopy and pickling, we need to not save the compiled TorchScript functions:
# See pickle docs: https://docs.python.org/3/library/pickle.html#pickling-class-instances
# torch.nn.Module does not currently impliment __get/setstate__ but may in the future, which is why we have these hasattr checks for other superclasses.
def __getstate__(self):
# - Get a state to work with -
# We need to check if other parent classes of self define __getstate__
if hasattr(super(CodeGenMixin, self), "__getstate__"):
out = super(CodeGenMixin, self).__getstate__().copy()
else:
out = self.__dict__.copy()
# - Remove compiled methods -
if hasattr(self, "__codegen__"):
# We cant save compiled functions
for fname in self.__codegen__:
out.pop(fname, None)
return out
def __setstate__(self, d):
d = d.copy()
if "__codegen__" in d:
codegen_state = d.pop("__codegen__")
# Remove any compiled methods that somehow entered the state
for k in codegen_state:
d.pop(k, None)
self.__codegen__ = codegen_state
self._codegen_compile()
# We need to check if other parent classes of self define __getstate__
if hasattr(super(CodeGenMixin, self), "__setstate__"):
super(CodeGenMixin, self).__setstate__(d)
else:
self.__dict__.update(d)
| 2.859375 | 3 |
mysite/calls/migrations/0001_initial.py | gurupratap-matharu/django-calls-registration-app | 0 | 12790422 | # Generated by Django 2.1.5 on 2019-01-29 23:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Call',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('call_time', models.IntegerField(verbose_name='Call duration in seconds')),
('call_type', models.CharField(choices=[('IN', 'International'), ('NL', 'National'), ('DO', 'Domestic')], default='DO', max_length=2)),
],
),
]
| 1.875 | 2 |
src/rgbw_colorspace_converter/randomcolor.py | fossabot/rgbw_colorspace_converter | 0 | 12790423 | <reponame>fossabot/rgbw_colorspace_converter<filename>src/rgbw_colorspace_converter/randomcolor.py<gh_stars>0
"""
randomColor.py
Python translation of randomColor.js
http://llllll.li/randomColor/
https://github.com/davidmerfield/randomColor
randomColor generates attractive colors by default. More specifically,
it produces bright colors with a reasonably high saturation.
c = random_color()
print c.rgb
You can also specify a hue or luminosity to constrain
the colors generated:
Hue can be: 'red', 'orange', 'yellow', 'green', 'blue', 'purple',
'pink' or 'monochrome'
c = random_color('red')
Luminosity can be: 'bright', 'light', 'dark', 'random'
c = random_color(luminosity='light')
"""
from collections import namedtuple
from numbers import Number
import itertools
import random
from color import HSV
__all__ = ["random_color"]
def random_within(_min, _max):
"Return a random number within two values, inclusive of those values"
return random.randrange(int(_min), int(_max) + 1)
ColorDef = namedtuple(
"ColorDef", ["hue_range", "lower_bounds", "saturation_range", "brightness_range"]
)
def define_color(hue_range, lower_bounds):
s_min = lower_bounds[0][0]
s_max = lower_bounds[-1][0]
b_min = lower_bounds[-1][1]
b_max = lower_bounds[0][1]
return ColorDef(
hue_range=hue_range,
lower_bounds=lower_bounds,
saturation_range=(s_min, s_max),
brightness_range=(b_min, b_max),
)
def make_color_bounds():
COLOR_BOUNDS = [
# name, hue_range, lower_bounds
("monochrome", None, [[0, 0], [100, 0]]),
(
"red",
[-26, 18],
[
[20, 100],
[30, 92],
[40, 89],
[50, 85],
[60, 78],
[70, 70],
[80, 60],
[90, 55],
[100, 50],
],
),
(
"orange",
[19, 46],
[[20, 100], [30, 93], [40, 88], [50, 86], [60, 85], [70, 70], [100, 70]],
),
(
"yellow",
[47, 62],
[
[25, 100],
[40, 94],
[50, 89],
[60, 86],
[70, 84],
[80, 82],
[90, 80],
[100, 75],
],
),
(
"green",
[63, 178],
[
[30, 100],
[40, 90],
[50, 85],
[60, 81],
[70, 74],
[80, 64],
[90, 50],
[100, 40],
],
),
(
"blue",
[179, 257],
[
[20, 100],
[30, 86],
[40, 80],
[50, 74],
[60, 60],
[70, 52],
[80, 44],
[90, 39],
[100, 35],
],
),
(
"purple",
[258, 282],
[
[20, 100],
[30, 87],
[40, 79],
[50, 70],
[60, 65],
[70, 59],
[80, 52],
[90, 45],
[100, 42],
],
),
(
"pink",
[283, 334],
[[20, 100], [30, 90], [40, 86], [60, 84], [80, 80], [90, 75], [100, 73]],
),
]
dat = {}
for (name, hue_range, lower_bounds) in COLOR_BOUNDS:
dat[name] = define_color(hue_range, lower_bounds)
return dat
COLOR_DICT = make_color_bounds()
def get_color_info(hue):
# XXX takes int 0-360
# hacky method of not having to store two ranges for red
if 334 <= hue <= 360:
hue -= 360
for (name, color) in list(COLOR_DICT.items()):
if color.hue_range and hue >= color.hue_range[0] and hue <= color.hue_range[1]:
return color
raise Exception("No color found for hue=%d" % hue)
def get_saturation_range(hue):
# takes a hue int[0-360]
# XXX what's the valid range for saturation values?
try:
return get_color_info(hue).saturation_range # XXX
except Exception as e:
del e
print("exception in get_saturation_range for hue=", hue)
return (0, 100)
def get_hue_range(cin):
# XXX what format is this?
# returns (hue_min, hue_max)
if isinstance(cin, Number):
i = int(cin)
if 0 > i > 360:
return (i, i)
if isinstance(cin, str):
if cin in COLOR_DICT:
return COLOR_DICT[cin].hue_range
return (0, 360)
def pairwise(iterable): # from the itertools documentation
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def get_minimum_brightness(h, s):
# h is int[0,360]
# s is ????
lower_bounds = get_color_info(h).lower_bounds
for (sv1, sv2) in pairwise(lower_bounds):
s1, v1 = sv1
s2, v2 = sv2
if s1 <= s <= s2:
m = (v2 - v1) / (s2 - s1)
b = v1 - (m * s1)
return m * s + b
return 0
def pick_brightness(h, s, luminosity=None):
b_min = get_minimum_brightness(h, s)
b_max = 100
if luminosity == "dark":
b_max = b_min + 20
elif luminosity == "light":
b_min = (b_max + b_max) / 2
elif luminosity == "random":
b_min = 0
b_max = 100
# print "brightness range:", (b_min, b_max)
return random_within(b_min, b_max)
def pick_saturation(h, hue=None, luminosity=None):
if luminosity == "random":
return random_within(0, 100)
if hue == "monochrome":
return 0
(s_min, s_max) = get_saturation_range(h)
if luminosity == "bright":
s_min = 55
elif luminosity == "dark":
s_min = s_max - 10
elif luminosity == "light":
s_max = 55
return random_within(s_min, s_max)
def pick_hue(hue):
(hue_min, hue_max) = get_hue_range(hue)
h = random_within(hue_min, hue_max)
if h < 0:
h += 360
return h
def random_color(hue=None, luminosity=None):
"""
Return a random color, by default a bright and highly
saturated color.
'hue' can be: 'red', 'orange', 'yellow', 'green', 'blue', 'purple', 'pink', 'monochrome'
'luminosity' can be: 'bright', 'light', 'dark', 'random'
"""
h = pick_hue(hue)
s = pick_saturation(h, hue, luminosity)
v = pick_brightness(h, s, luminosity)
h = h / 360
s = s / 100
v = v / 100
return HSV(h, s, v)
| 3.671875 | 4 |
ros/src/tl_detector/light_classification/tl_classifier.py | ahtchow/CarND-Capstone | 0 | 12790424 | <reponame>ahtchow/CarND-Capstone
import os
import cv2
import numpy as np
import rospy
import tensorflow as tf
from cv_bridge import CvBridge
from styx_msgs.msg import TrafficLight
from sensor_msgs.msg import Image
THRESHOLD_SCORE = 0.6
NUM_CLASSES = 4
class TLClassifier(object):
def __init__(self, is_site):
# Load Classifer - Depending on Location [Site/Simulator]
if is_site == False:
self.threshold_score = THRESHOLD_SCORE
rospy.logwarn("STATUS: SIMULATOR")
ssd_model = os.path.abspath(os.curdir)+"/light_classification/frozen_model/frozen_sim_inception/frozen_inference_graph.pb"
else:
self.threshold_score = THRESHOLD_SCORE
rospy.logwarn("STATUS: SITE")
ssd_model = os.path.abspath(os.curdir)+"/light_classification/frozen_model/frozen_real_c2_1/frozen_inference_graph.pb"
self.sess = None
self.bridge = CvBridge()
self.detection_graph = tf.Graph()
# Tensorflow
with self.detection_graph.as_default():
classifier_graph = tf.GraphDef()
with tf.gfile.GFile(ssd_model, 'rb') as fid:
serialized_graph = fid.read()
classifier_graph.ParseFromString(serialized_graph)
tf.import_graph_def(classifier_graph, name='')
self.sess = tf.Session(graph=self.detection_graph)
# Classifier Elements
self.image_tensor = self.detection_graph.get_tensor_by_name('image_tensor:0')
self.detection_scores = self.detection_graph.get_tensor_by_name('detection_scores:0')
self.detection_classes = self.detection_graph.get_tensor_by_name('detection_classes:0')
self.detection_boxes = self.detection_graph.get_tensor_by_name('detection_boxes:0')
self.num_detections = self.detection_graph.get_tensor_by_name('num_detections:0')
rospy.loginfo("Traffic Light Classifier is Loaded!")
def get_classification(self, image):
"""Determines the color of the traffic light in the image
Args:
image (cv::Mat): image containing the traffic light
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# Predict Color of Traffic Light
state = TrafficLight.UNKNOWN
# image = self.load_image_into_numpy_array(image)
image_np_expanded = np.expand_dims(image, axis=0)
#Predict
with self.detection_graph.as_default():
boxes, scores_array, classes_array, num_detections = self.sess.run(
[self.detection_boxes, self.detection_scores, self.detection_classes, self.num_detections],
feed_dict={self.image_tensor: image_np_expanded})
#Here is the pbtxt format:
'''
item {
id: 1
name: 'Green'
}
item {
id: 2
name: 'Red'
}
item {
id: 3
name: 'Yellow'
}
item {
id: 4
name: 'off'
}
'''
#Process Scores
scores = np.array([s for s in scores_array[0] if s > self.threshold_score])
if len(scores) >= 1:
classes = classes_array[0,0:len(scores)].astype('int32')
# If any RED, send RED:
if (classes==2).any():
state = TrafficLight.RED
else:
counts = np.bincount(classes)
most_class = np.argmax(counts)
if most_class == 1:
state = TrafficLight.GREEN
elif most_class == 3:
state = TrafficLight.YELLOW
return state | 2.328125 | 2 |
pipeline/ccf.py | rozmar/map-ephys | 0 | 12790425 |
import csv
import logging
import numpy as np
import datajoint as dj
import pathlib
import scipy.io as scio
from tifffile import imread
from . import InsertBuffer
from .reference import ccf_ontology
from . import get_schema_name
schema = dj.schema(get_schema_name('ccf'))
log = logging.getLogger(__name__)
@schema
class CCFLabel(dj.Lookup):
definition = """
# CCF Dataset Information
ccf_label_id: int # Local CCF ID
---
ccf_version: int # Allen CCF Version
ccf_resolution: int # Voxel Resolution (uM)
ccf_description: varchar(255) # CCFLabel Description
"""
CCF_R3_20UM_ID = 0
CCF_R3_20UM_DESC = 'Allen Institute Mouse CCF, Rev. 3, 20uM Resolution'
CCF_R3_20UM_TYPE = 'CCF_R3_20UM'
contents = [(CCF_R3_20UM_ID, 3, 20, CCF_R3_20UM_DESC)]
@schema
class CCF(dj.Lookup):
definition = """
# Common Coordinate Framework
-> CCFLabel
ccf_x : int # (um)
ccf_y : int # (um)
ccf_z : int # (um)
"""
@schema
class AnnotationType(dj.Lookup):
definition = """
annotation_type : varchar(16)
"""
contents = ((CCFLabel.CCF_R3_20UM_TYPE,),)
@schema
class CCFAnnotation(dj.Manual):
definition = """
-> CCF
-> AnnotationType
---
annotation : varchar(1024)
index (annotation)
"""
@classmethod
def get_ccf_r3_20um_ontology_regions(cls):
return [c for c in csv.reader(ccf_ontology.splitlines())
if len(c) == 2]
@classmethod
def load_ccf_r3_20um(cls):
"""
Load the CCF r3 20 uM Dataset.
Requires that dj.config['ccf.r3_20um_path'] be set to the location
of the CCF Annotation tif stack.
"""
# TODO: scaling
log.info('CCFAnnotation.load_ccf_r3_20um(): start')
self = cls() # Instantiate self,
stack_path = dj.config['custom']['ccf.r3_20um_path']
stack = imread(stack_path) # load reference stack,
log.info('.. loaded stack of shape {} from {}'
.format(stack.shape, stack_path))
# iterate over ccf ontology region id/name records,
regions = self.get_ccf_r3_20um_ontology_regions()
region, nregions = 0, len(regions)
chunksz, ib_args = 50000, {'skip_duplicates': True,
'allow_direct_insert': True}
for num, txt in regions:
region += 1
num = int(num)
log.info('.. loading region {} ({}/{}) ({})'
.format(num, region, nregions, txt))
# extracting filled volumes from stack in scaled [[x,y,z]] shape,
vol = np.array(np.where(stack == num)).T[:, [2, 1, 0]] * 20
if not vol.shape[0]:
log.info('.. region {} volume: shape {} - skipping'
.format(num, vol.shape))
continue
log.info('.. region {} volume: shape {}'.format(num, vol.shape))
with dj.conn().transaction:
with InsertBuffer(CCF, chunksz, **ib_args) as buf:
for vox in vol:
buf.insert1((CCFLabel.CCF_R3_20UM_ID, *vox))
buf.flush()
with InsertBuffer(cls, chunksz, **ib_args) as buf:
for vox in vol:
buf.insert1((CCFLabel.CCF_R3_20UM_ID, *vox,
CCFLabel.CCF_R3_20UM_TYPE, txt))
buf.flush()
log.info('.. done.')
@schema
class AnnotatedBrainSurface(dj.Manual):
definition = """ # iso-surface of annotated brain in CCF coordinate frame
annotated_brain_name: varchar(100) # e.g. Annotation_new_10_ds222_16bit
---
vertices: longblob # (px)
faces: longblob
"""
@classmethod
def load_matlab_mesh(self, mesh_fp):
mesh_fp = pathlib.Path(mesh_fp).resolve()
assert mesh_fp.exists()
mesh = scio.loadmat(mesh_fp, struct_as_record = False, squeeze_me = True)['mesh']
self.insert1(dict(annotated_brain_name=mesh_fp.stem,
vertices=mesh.vertices,
faces=mesh.faces - 1), # 0-base index
allow_direct_insert=True)
| 2.109375 | 2 |
bot.py | shenkw1/gamelettr | 1 | 12790426 | import os
import discord
import requests
import json
from dotenv import load_dotenv
from discord.ext import commands
from datetime import datetime
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
API_KEY = os.getenv('API_KEY')
HEADERS = {
"x-api-key" : API_KEY
}
bot = commands.Bot(command_prefix = "-")
ROOT_URL = "https://esports-api.lolesports.com/persisted/gw/"
# Getting leagues from API data
response = requests.get(ROOT_URL + "getLeagues?hl=en-US", headers=HEADERS)
response_info = response.json()
leagues = response_info["data"]["leagues"]
# Organizing data and adding it to region-league hashmap, and adding IDs to list
regions = {}
ids = []
imgs = []
for league in leagues:
region = league["region"]
league_name = league["name"]
league_id = league["id"]
league_image = league["image"]
if region not in regions:
regions[region] = []
regions[region].append(league_name)
ids.append(league_id)
imgs.append(league_image)
# Connection
@bot.event
async def on_ready():
print(f'{bot.user.name} has connected to Discord')
# Ping command
@bot.command(help='Returns connection time')
async def ping(ctx):
msg = await ctx.channel.send("Pong")
now = datetime.now().timestamp()
ping = round(bot.latency * 1000)
edit_to = f"Pong, {ping} ms"
await msg.edit(content=edit_to)
# List command
# Outputs all the supported leagues, organized by region in embed menu
@bot.command(help="Returns supported leagues")
async def list(ctx):
# Creating embed
embed = discord.Embed(title="Supported leagues", color=discord.Color.blurple())
embed.set_thumbnail(url="https://pbs.twimg.com/profile_images/1407732613171793925/pQZWynfn_400x400.jpg")
for region in regions:
formatted_str = ", ".join(regions[region])
if region == "COMMONWEALTH OF INDEPENDENT STATES":
embed.add_field(name="CONTINENTAL", value=formatted_str, inline=True)
else:
embed.add_field(name=region, value=formatted_str, inline=(region!="EUROPE"))
# Adding empty character to fix column alignment in embed
v = 3 - ((len(regions) - 1) % 3)
for _ in range(v):
embed.add_field(name = "\u200b", value= "\u200b")
await ctx.channel.send(embed=embed)
bot.run(TOKEN) | 2.671875 | 3 |
common/appconf.py | JobtechSwe/sokannonser-api | 14 | 12790427 | import logging
from common import settings
from elasticapm.contrib.flask import ElasticAPM
log = logging.getLogger(__name__)
def configure_app(flask_app):
flask_app.config.SWAGGER_UI_DOC_EXPANSION = settings.RESTPLUS_SWAGGER_UI_DOC_EXPANSION
flask_app.config.RESTPLUS_VALIDATE = settings.RESTPLUS_VALIDATE
flask_app.config.RESTPLUS_MASK_SWAGGER = settings.RESTPLUS_MASK_SWAGGER
flask_app.config.ERROR_404_HELP = settings.RESTPLUS_ERROR_404_HELP
if settings.APM_SERVICE_NAME and settings.APM_SERVICE_URL and settings.APM_SECRET:
flask_app.config['ELASTIC_APM'] = {
'SERVICE_NAME': settings.APM_SERVICE_NAME,
'SERVER_URL': settings.APM_SERVICE_URL,
'SECRET_TOKEN': settings.APM_SECRET,
'COLLECT_LOCAL_VARIABLES': 'off',
# regex to ignore specific routes
'TRANSACTIONS_IGNORE_PATTERNS': ['^OPTIONS ', '^HEAD ', '^.*\/\s*$', '.*swagger'],
'CAPTURE_BODY': 'errors',
'CAPTURE_HEADERS': 'false'
}
apm = ElasticAPM(flask_app, logging=settings.APM_LOG_LEVEL)
apm.capture_message('hello, apm!')
log.info("ElasticAPM enabled")
log.debug(f"APM details. Name: {settings.APM_SERVICE_NAME}, log level: {settings.APM_LOG_LEVEL}")
else:
log.info("ElasticAPM is disabled")
def initialize_app(flask_app, api):
configure_app(flask_app)
api.init_app(flask_app)
| 2.125 | 2 |
examples/house-credit-default/get_input.py | wqruan/tf-encrypted | 825 | 12790428 | """CLI for data preparation and processing."""
import argparse
from utils import data_prep
from utils import read_one_row
from utils import save_input
parser = argparse.ArgumentParser()
parser.add_argument(
"--save_row",
type=int,
default="0",
help="Saves a single row to a file defaults to row 0",
)
parser.add_argument(
"--input_file",
type=str,
default="final_data_with_feature_engineered.csv",
help=(
"File to read the row from defaults to "
"final_data_with_feature_engineered.csv"
),
)
parser.add_argument(
"--output_file",
type=str,
default="input.npy",
help=("Output file with the input row defaults to " "input.npy"),
)
config = parser.parse_args()
input_file = config.input_file
output_file = config.output_file
save_row = config.save_row
train_x_df, _ = data_prep(input_file)
out = read_one_row(save_row, train_x_df)
save_input(output_file, out)
| 2.828125 | 3 |
Python-Classes/class9.py | ViFLara/Python-Classes | 0 | 12790429 | list = [1, 10]
file = open('test.txt', 'r')
try:
text = file.read()
division = 10 / 1
number = list[1]
except ZeroDivisionError:
print('Unable to perform a division by zero')
except ArithmeticError:
print('There was an error performing an arithmetic operation.')
except IndexError:
print("Error accessing invalid list index")
except Exception as ex:
print(f'Unknown error. Error: {ex}')
else:
print('Run when no exception occurs')
finally:
print('Always run')
print('Closing file')
file.close()
| 3.671875 | 4 |
src/python/ch07/sec08.py | zhuyuanxiang/Dive-into-Deep-Learning | 0 | 12790430 | <filename>src/python/ch07/sec08.py
# -*- encoding: utf-8 -*-
"""
@Author : zYx.Tom
@Contact : <EMAIL>
@site : https://zhuyuanxiang.github.io
---------------------------
@Software : PyCharm
@Project : Dive-into-Deep-Learning
@File : sec0202.py
@Version : v0.1
@Time : 2020-12-27 9:25
@License : (C)Copyright 2018-2020, zYx.Tom
@Reference : 《动手学深度学习》
@Desc : Sec 7.8 Adam 算法
@小结:
1. Adam 算法在 RMSProp 算法的基础上对小批量随机梯度也做了指数加权移动平均
2. Adam 算法使用了偏差修正
"""
import d2lzh as d2l
import data
import mxnet as mx
import numpy as np
from mxnet import autograd, gluon, init, nd
from mxnet.gluon import data as gdata, loss as gloss, nn
from tools import beep_end, show_subtitle, show_title, show_figures
# ----------------------------------------------------------------------
def main():
# Adam 算法在 RMSProp 算法基础上对小批量随机梯度做了指数加权移动平均
# Adam 算法使用了动量变量 $v_t$ 和 RMSProp 算法中小批量随机梯度按元素平方的指数加权移动平均变量 $s_t$
# 超参数 $\beta_1=0.9$,时间步 $t$ 的动量变量 $v_t$ 即小批量随机梯度 $g_t$ 的指数加权移动平均
# $v_t\leftarrow\beta_1 v_{t-1}+(1-\beta_1)g_t$
# 与 RMSProp 算法一样,超参数 $beta_2=0.999$,小批量随机梯度按元素平方后的项 $g_t\odot g_t$ 做指数加权移动平均得到 $s_t$
# $s_t\leftarrow\beta_2 s_{t-1}+(1-\beta_2) g_t\odot g_t$
# 当 $t$ 较小时,过去各个时间步小批量随机梯度权值之和也会较小
# 为了消除这样的影响,对于任意时间步 $t$,可以除以 $1-\beta_{?}^t$,从而使过去各时间步小批量随机梯度权值之和为1,这个操作叫做偏差修正
# $\hat{v}_t\leftarrow\frac{v_t}{1-\beta_1^t}$
# $\hat{s}_t\leftarrow\frac{s_t}{1-\beta_2^t}$
# 使用修正后的变量更新随机梯度,目录函数自变量中的每个元素都分别拥有自己的学习率
# $g_t'\leftarrow\frac{\eta\hat{v}_t}{\sqrt{\hat{s}_t}+\epsilon}$
# 使用 $g_t'$ 更新自变量
# $x_t\leftarrow x_{t-1}-g_t'$
features, labels = data.get_data_ch7()
def init_adam_states():
v_w, v_b = nd.zeros((features.shape[1], 1)), nd.zeros(1)
s_w, s_b = nd.zeros((features.shape[1], 1)), nd.zeros(1)
return (v_w, s_w), (v_b, s_b)
def adam(params, states, hyperparams):
beta1, beta2, eps = 0.9, 0.999, 1e-6
learning_rate = hyperparams['lr']
for p, (v, s) in zip(params, states):
v[:] = beta1 * v + (1 - beta1) * p.grad
s[:] = beta2 * s + (1 - beta2) * p.grad.square()
v_bias_corr = v / (1 - beta1 ** hyperparams['t'])
s_bias_corr = s / (1 - beta2 ** hyperparams['t'])
p[:] -= learning_rate * v_bias_corr / (s_bias_corr.sqrt() + eps)
pass
hyperparams['t'] += 1
pass
d2l.train_ch7(adam, init_adam_states(), {'lr': 0.01, 't': 1}, features, labels)
d2l.train_gluon_ch7('adam', {'learning_rate': 0.01}, features, labels)
pass
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
# 运行结束的提醒
beep_end()
show_figures()
| 2.71875 | 3 |
misc/python/detective.py | saranshbht/codes-and-more-codes | 0 | 12790431 | <filename>misc/python/detective.py
n = int(input())
l = list(map(int, input().split()))
lst = []
for i in range(0, n + 1):
if i not in l:
lst.append(str(i))
print(" ".join(lst)) | 3.3125 | 3 |
tests/blendernc_nodetree_settings.test.py | StephanSiemen/blendernc | 39 | 12790432 | import os
import sys
import unittest
from io import StringIO
import bpy
import tests.test_utils as tutils
from blendernc.preferences import get_addon_preference
@tutils.refresh_state
def create_nodes(file, var):
node_groups = bpy.data.node_groups
if tutils.is_blendernc_in_nodetree(node_groups):
node_groups.remove(node_groups["BlenderNC"])
bpy.data.node_groups.new("BlenderNC", "BlenderNC")
# Create nodes
nodes = ["datacubePath", "datacubeNode", "datacubeResolution", "datacubeOutput"]
node_names = tutils.create_nodes(nodes)
node_tree = bpy.data.node_groups["BlenderNC"]
existing_nodes = [node_tree.nodes[node] for node in node_names]
# Now let's change properties.
props = tutils.build_dict_blendernc_prop(existing_nodes)
props["datacube Path"]["blendernc_file"] = file
props["datacube Input"]["blendernc_datacube_vars"] = var
props["Resolution"]["bendernc_resolution"] = 80
props["Output"]["update_on_frame_change"] = True
tutils.join_nodes(node_tree, existing_nodes, props)
# Create new image
bpy.ops.image.new(
name="BlenderNC_default",
width=1024,
height=1024,
color=(0.0, 0.0, 0.0, 1.0),
alpha=True,
generated_type="BLANK",
float=True,
)
# Assign new image to node
existing_nodes[-1].image = bpy.data.images.get("BlenderNC_default")
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
class Test_settings(unittest.TestCase):
def setUp(self) -> None:
file = os.path.abspath("./dataset/ssh_1995-01.nc")
var = "adt"
create_nodes(file, var)
return super().setUp()
def test_animation_setting_extend(self):
bpy.context.scene.blendernc_animation_type = "EXTEND"
frames = [3, 4, 5]
loaded_frames = []
for frame in frames:
with Capturing() as output:
bpy.context.scene.frame_set(frame)
if output:
loaded_frames.append(int(output[-3].split(" ")[-2]))
print("\n".join(output))
self.assertEqual(loaded_frames, [3, 4, 4])
def test_animation_setting_none(self):
loaded_frames = []
bpy.context.scene.blendernc_animation_type = "NONE"
frames = [3, 4, 5]
for frame in frames:
with Capturing() as output:
bpy.context.scene.frame_set(frame)
if output:
loaded_frames.append(int(output[-3].split(" ")[-2]))
print("\n".join(output))
print(loaded_frames)
# self.assertEqual(loaded_frames, [3,4])
def test_animation_setting_loop(self):
loaded_frames = []
bpy.context.scene.blendernc_animation_type = "LOOP"
frames = [3, 4, 5]
for frame in frames:
with Capturing() as output:
bpy.context.scene.frame_set(frame)
if output:
loaded_frames.append(int(output[-3].split(" ")[-2]))
print("\n".join(output))
self.assertEqual(loaded_frames, [3, 4, 0])
def test_memory_frames(self):
bpy.context.scene.blendernc_memory_handle = "FRAMES"
bpy.context.scene.blendernc_frames = 1
frames = [2, 3, 4]
removed_frames = []
for frame in frames:
with Capturing() as output:
bpy.context.scene.frame_set(frame)
if "Removed" in output[-1]:
removed_frames.append(int(output[-1].split(": ")[-1]))
print("\n".join(output))
self.assertEqual(removed_frames, [2, 3])
def test_memory_dynamic(self):
bpy.context.scene.blendernc_memory_handle = "DYNAMIC"
bpy.context.scene.blendernc_avail_mem_purge = 100
frames = [2, 3, 4]
removed_frames = []
for frame in frames:
with Capturing() as output:
bpy.context.scene.frame_set(frame)
if "Removed" in output[-1]:
removed_frames.append(int(output[-1].split(": ")[-1]))
print("\n".join(output))
self.assertEqual(removed_frames, [2, 3])
def test_dask(self):
pref = get_addon_preference()
pref.blendernc_use_dask = "True"
pref.blendernc_use_dask = "True"
pref.blendernc_use_dask = "False"
pref.blendernc_use_dask = "False"
pref.blendernc_use_dask = "True"
pref.blendernc_use_dask = "False"
suite = unittest.defaultTestLoader.loadTestsFromTestCase(Test_settings)
test = unittest.TextTestRunner().run(suite)
ret = not test.wasSuccessful()
sys.exit(ret)
| 2.109375 | 2 |
kaffepause/conftest.py | Eirsteir/kaffepause | 0 | 12790433 | from __future__ import print_function
import os
import warnings
import pytest
from graphene_django.utils.testing import graphql_query
from graphql_jwt.settings import jwt_settings
from graphql_jwt.shortcuts import get_token
from neo4j.exceptions import ClientError as CypherError
from neobolt.exceptions import ClientError
from neomodel import change_neo4j_password, clear_neo4j_database, config, db
from kaffepause.accounts.models import Account
from kaffepause.accounts.test.factories import AccountFactory
from kaffepause.users.models import User
from kaffepause.users.test.factories import UserFactory
@pytest.fixture(autouse=True)
def setup_and_teardown():
"""Fixture to clear database in between each test function."""
clear_neo4j_database(db)
yield
clear_neo4j_database(db)
def pytest_addoption(parser):
"""
Adds the command line option --resetdb.
:param parser: The parser object. Please see <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_addoption>`_
:type Parser object: For more information please see <https://docs.pytest.org/en/latest/reference.html#_pytest.config.Parser>`_
"""
parser.addoption(
"--resetdb",
action="store_true",
help="Ensures that the database is clear prior to running tests for neomodel",
default=False,
)
def pytest_sessionstart(session):
"""
Provides initial connection to the database and sets up the rest of the test suite
:param session: The session object. Please see <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_sessionstart>`_
:type Session object: For more information please see <https://docs.pytest.org/en/latest/reference.html#session>`_
"""
warnings.simplefilter("default")
config.DATABASE_URL = os.environ.get(
"NEO4J_BOLT_URL", "bolt://neo4j:foobar@localhost:7687"
)
config.AUTO_INSTALL_LABELS = True
try:
# Clear the database if required
database_is_populated, _ = db.cypher_query(
"MATCH (a) return count(a)>0 as database_is_populated"
)
if database_is_populated[0][0] and not session.config.getoption("resetdb"):
raise SystemError(
"Please note: The database seems to be populated.\n\tEither delete all nodes and edges manually, "
"or set the --resetdb parameter when calling pytest\n\n\tpytest --resetdb."
)
else:
clear_neo4j_database(db)
except (CypherError, ClientError) as ce:
# Handle instance without password being changed
if (
"The credentials you provided were valid, but must be changed before you can use this instance"
in str(ce)
):
warnings.warn(
"New database with no password set, setting password to '<PASSWORD>'"
)
try:
change_neo4j_password(db, "test")
# Ensures that multiprocessing tests can use the new password
config.DATABASE_URL = "bolt://neo4j:test@localhost:7687"
db.set_connection("bolt://neo4j:test@localhost:7687")
warnings.warn(
"Please 'export NEO4J_BOLT_URL=bolt://neo4j:test@localhost:7687' for subsequent test runs"
)
except (CypherError, ClientError) as e:
if (
"The credentials you provided were valid, but must be changed before you can use this instance"
in str(e)
):
warnings.warn(
"You appear to be running on version 4.0+ of Neo4j, without having changed the password."
"Please manually log in, change your password, then update the config.DATABASE_URL call at "
"line 32 in this file"
)
else:
raise e
else:
raise ce
@pytest.fixture(autouse=True)
def account() -> Account:
account = AccountFactory()
account.status.verified = True
account.status.save()
return account
@pytest.fixture(autouse=True)
def user(account) -> User:
return UserFactory(uuid=account.id)
@pytest.fixture
def friend(user) -> User:
friend = UserFactory()
user.add_friend(friend)
return friend
@pytest.fixture(autouse=True)
def token(account):
return f"{jwt_settings.JWT_AUTH_HEADER_PREFIX} {get_token(account)}"
@pytest.fixture(autouse=True)
def auth_headers(token):
return {jwt_settings.JWT_AUTH_HEADER_NAME: token}
@pytest.fixture
def client_query(client):
def func(*args, **kwargs):
return graphql_query(*args, **kwargs, client=client)
return func
| 1.851563 | 2 |
tests/test_json.py | bobhancock/proto-plus-python | 0 | 12790434 | # Copyright (C) 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import proto
from google.protobuf.json_format import MessageToJson, Parse
def test_message_to_json():
class Squid(proto.Message):
mass_kg = proto.Field(proto.INT32, number=1)
s = Squid(mass_kg=100)
json = Squid.to_json(s)
json = json.replace(" ", "").replace("\n", "")
assert json == '{"massKg":100}'
def test_message_from_json():
class Squid(proto.Message):
mass_kg = proto.Field(proto.INT32, number=1)
json = """{
"massKg": 100
}
"""
s = Squid.from_json(json)
assert s == Squid(mass_kg=100)
def test_message_json_round_trip():
class Squid(proto.Message):
mass_kg = proto.Field(proto.INT32, number=1)
s = Squid(mass_kg=100)
json = Squid.to_json(s)
s2 = Squid.from_json(json)
assert s == s2
| 2.296875 | 2 |
src/consolidate_washington_timber_reports_2003-2017.py | Ecotrust/embodied_carbon | 0 | 12790435 | <reponame>Ecotrust/embodied_carbon<gh_stars>0
import os
import re
import glob
import numpy as np
import pandas as pd
excel_files = glob.glob('../data/external/reports_2003-2017/*xl*')
fnames = [os.path.basename(f) for f in excel_files]
years = [int(re.findall('\d{4}', fname)[0]) for fname in fnames]
OWNERS = {'Private - Industrial': 'industry',
'Private - Large': 'large_private',
'Private - Small': 'small_private',
'Private - Unknown': 'unknown_private',
'State': 'state',
'Other Public': 'other_public',
'Federal': 'federal'}
# capitalization is not consistent in every spreadsheet, so we'll need
# to identify each sheet_name that matches any of the counties listed above
COUNTIES = ['ASOTIN', 'CHELAN', 'CLALLAM', 'CLARK', 'COLUMBIA', 'COWLITZ',
'FERRY', 'GARFIELD', 'GRAYS HARBOR', 'ISLAND', 'JEFFERSON', 'KING',
'KITSAP', 'KITTITAS', 'KLICKITAT', 'LEWIS', 'LINCOLN', 'MASON',
'OKANOGAN', 'PACIFIC', 'PEND OREILLE', 'PIERCE', 'SAN JUAN',
'SKAGIT', 'SKAMANIA', 'SNOHOMISH', 'SPOKANE', 'STEVENS',
'THURSTON', 'WAHKIAKUM', 'WHATCOM', 'YAKIMA']
COUNTIES2 = [x+'2' for x in COUNTIES]
COUNTIES_PLUS = [x+' COUNTY' for x in COUNTIES]
year_dfs = []
for idx, f in enumerate(excel_files):
sheets = pd.ExcelFile(f).sheet_names
good_sheets = [s for s in sheets if s.upper() in COUNTIES]
if len(good_sheets) == 0:
good_sheets = [s for s in sheets if s.upper() in COUNTIES2]
if len(good_sheets) == 0:
good_sheets = [s for s in sheets if s.upper() in COUNTIES_PLUS]
county_dfs = []
for sheet_name in good_sheets:
df = pd.read_excel(f, index_col=0, sheet_name=sheet_name,
header=None, skiprows=range(0,10))
# the rows with individual owner types are preceded by two spaces
use = df.index.fillna('').str.lstrip().str.rstrip().isin(OWNERS.keys())
data = df.loc[use].T.iloc[-1]
# strip whitespace from the names
data.index = data.index.str.strip()
data['year'] = years[idx]
if sheet_name.upper() in COUNTIES2:
county_name = sheet_name[:-1]
elif sheet_name.upper() in COUNTIES_PLUS:
county_name = sheet_name.split(' County')[0]
else:
county_name = sheet_name
data['county'] = county_name.upper()
county_dfs.append(data)
try:
year_dfs.append(pd.concat(county_dfs, axis=1,
ignore_index=True, sort=True).T)
except:
print('Failed on', f)
pass
consolidated = pd.concat(year_dfs, axis=0, ignore_index=True, sort=True)
consolidated = consolidated.sort_values(by=['year', 'county'])
COL_ORDER = ['year', 'county'] + list(OWNERS.keys())
consolidated = consolidated[COL_ORDER].rename(OWNERS, axis=1)
consolidated.to_csv('../data/raw/washington_timber_harvest_2003-2017.csv',
index=False)
| 2.578125 | 3 |
04_Inspecao/4.1_Graficos_de_Dependencia_Parcial_e_Expectativa_Condicional_Individual/4.1.3_Definicao_Matematica.py | BrunoBertti/Scikit_Learning | 0 | 12790436 | <gh_stars>0
########## 4.1.3. Definição matemática ##########
# Seja X_S o conjunto de recursos de entrada de interesse (ou seja, o parâmetro de recursos) e seja X_C seu complemento.
# A dependência parcial da resposta f em um ponto x_S é definida como:
# \begin{split}pd_{X_S}(x_S) &\overset{def}{=} \mathbb{E}_{X_C}\left[ f(x_S, X_C) \right]\\
# &= \int f(x_S, x_C) p(x_C) dx_C,\end{split}
# onde f(x_S, x_C) é a função de resposta (predict, predict_proba ou decision_function) para uma determinada amostra cujos valores são definidos por x_S para os recursos em X_S e por x_C para os recursos em X_C. Observe que x_S e x_C podem ser tuplas.
# Calcular esta integral para vários valores de x_S produz um gráfico PDP como acima. Uma linha ICE é definida como um único f(x_{S}, x_{C}^{(i)}) avaliado em x_{S}.
| 2.46875 | 2 |
src/dataset.py | prayashkrsaha/marketBias | 22 | 12790437 | import numpy as np
import pandas as pd
import sys
import os
from utils import DATA_DIR
class Dataset(object):
def __init__(self, DATA_NAME):
self.DATA_NAME = DATA_NAME
print("Initializing dataset:", DATA_NAME)
sys.stdout.flush()
data = pd.read_csv(os.path.join(DATA_DIR, "df_"+DATA_NAME+".csv"))
data['item_id'].loc[data['item_id'].isna()] = ''
data['user_id'].loc[data['user_id'].isna()] = ''
item_id_vals, item_ids = pd.factorize(data['item_id'].values)
user_id_vals, user_ids = pd.factorize(data['user_id'].values)
item_attr_vals, item_attr_ids = pd.factorize(data['model_attr'].values)
user_attr_vals, user_attr_ids = pd.factorize(data['user_attr'].values)
tmp = dict(zip(data['item_id'].values, item_attr_vals))
self.item_attr = np.array([tmp[_i] for _i in item_ids], dtype=int)
tmp = dict(zip(data['user_id'].values, user_attr_vals))
self.user_attr = np.array([tmp[_i] for _i in user_ids], dtype=int)
data['item_id'] = item_id_vals
data['user_id'] = user_id_vals
self.item_ids = item_ids
self.user_ids = user_ids
self.item_attr_ids = item_attr_ids
self.user_attr_ids = user_attr_ids
self.n_item = data['item_id'].max()+1
self.n_user = data['user_id'].max()+1
self.data = data[['user_id','item_id','rating','split','model_attr','user_attr']]
print("Successfully initialized!")
print(self.data.shape[0], "training records")
print("about", self.n_user, "users and", self.n_item, "items are loaded!")
sys.stdout.flush()
def get_user_item_train_map(self):
data = self.data
user_item_train_map = (self.data.loc[(self.data['rating']>=4) & (self.data['split'] == 0)]).groupby(
['user_id'])['item_id'].apply(list).to_dict()
return user_item_train_map
def get_neg_samples(self, N_NEG=10):
user_item_map = (self.data.loc[self.data['rating']>=4]).groupby(['user_id'])['item_id'].apply(list).to_dict()
print("Start sampling negative examples ...")
neg_samples = []
count = 0
print("current progress for", self.n_user, "users: ", end="")
sys.stdout.flush()
for u in range(self.n_user):
if count % 5000 == 0:
print(count, end=", ")
sys.stdout.flush()
count += 1
p = np.ones(self.n_item)
if u in user_item_map:
pos_items = np.array(user_item_map[u], dtype=int)
p[pos_items] = 0
p /= np.sum(p)
neg_items = np.random.choice(self.n_item, size=N_NEG, p=p)
neg_samples.append(neg_items)
print("done!")
sys.stdout.flush()
return np.array(neg_samples, dtype=int)
| 2.734375 | 3 |
pygramadan/xml_helpers.py | jimregan/pygramadan | 0 | 12790438 | <reponame>jimregan/pygramadan
from .attributes import Gender, Strength
from pygramadan.forms import Form, FormSg, FormPlGen
import xml.etree.ElementTree as ET
def write_sg(inlist, name, root):
for form in inlist:
seprops = {}
seprops['default'] = form.value
seprops['gender'] = 'fem' if form.gender == Gender.Fem else 'masc'
_ = ET.SubElement(root, name, seprops)
def write_pl(inlist, name, root):
for form in inlist:
seprops = {}
seprops['default'] = form.value
_ = ET.SubElement(root, name, seprops)
def write_pl_gen(inlist, name, root):
for form in inlist:
seprops = {}
seprops['default'] = form.value
seprops['strength'] = 'strong' if form.strength == Strength.Strong else 'weak'
_ = ET.SubElement(root, name, seprops)
def formsg_node(root, node, outlist):
for form in root.findall(node):
value = form.attrib.get('default')
gender = Gender.Fem if form.attrib.get('gender') == 'fem' else Gender.Masc
outlist.append(FormSg(value, gender))
def formpl_node(root, node, outlist):
for form in root.findall(node):
value = form.attrib.get('default')
outlist.append(Form(value))
def formplgen_node(root, node, outlist):
for form in root.findall(node):
value = form.attrib.get('default')
strength = Strength.Strong if form.attrib.get('strength') == 'strong' else Strength.Weak
outlist.append(FormPlGen(value, strength))
| 2.359375 | 2 |
setup.py | ElsevierSoftwareX/SOFTX_2019_323 | 0 | 12790439 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='HPexome',
version='1.2.1',
author="<NAME>",
author_email="<EMAIL>",
description="An automated tool for processing whole-exome sequencing data",
long_description=long_description,
long_description_content_type="text/markdown",
url="http://bcblab.org/hpexome",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3"
],
install_requires=[
'Click'
],
entry_points='''
[console_scripts]
hpexome=hpexome.hpexome:hpexome
''',
project_urls={
"Source Code": "https://github.com/labbcb/hpexome",
"Bug Tracker": "https://github.com/labbcb/hpexome/issues"
}
)
| 1.578125 | 2 |
torchlib/dataloader.py | gkaissis/4P | 7 | 12790440 | import os
import random
import syft as sy
import pandas as pd
import numpy as np
from PIL import Image
from tqdm import tqdm
from torch import ( # pylint:disable=no-name-in-module
manual_seed,
stack,
cat,
std_mean,
save,
is_tensor,
from_numpy,
randperm,
default_generator,
)
from torch._utils import _accumulate
import albumentations as a
from copy import deepcopy
from torch.utils import data as torchdata
from torchvision.datasets import MNIST
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from os.path import splitext
from typing import Dict, Union, Set, Callable
from pathlib import Path
from .dicomtools import DicomLoader
class AlbumentationsTorchTransform:
def __init__(self, transform, **kwargs):
# print("init albu transform wrapper")
self.transform = transform
self.kwargs = kwargs
def __call__(self, img):
# print("call albu transform wrapper")
if Image.isImageType(img):
img = np.array(img)
elif is_tensor(img):
img = img.cpu().numpy()
img = self.transform(image=img, **self.kwargs)["image"]
# if img.max() > 1:
# img = a.augmentations.functional.to_float(img, max_value=255)
img = from_numpy(img)
if img.shape[-1] < img.shape[0]:
img = img.permute(2, 0, 1)
return img
class CombinedLoader:
"""Class that combines several data loaders and their extensions.
Args:
mapping (Dict): Dictionary that maps loader names to tuples
consisting of (corresponding extensions, loader method)
"""
def __init__(
self,
mapping: Dict[str, Dict[str, Union[Set[str], Callable]]] = {
"default": {
"extensions": {
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
},
"loader": default_loader,
},
"dicom": {"extensions": {".dcm", ".dicom"}, "loader": DicomLoader(3)},
},
):
self.extensions = set()
self.mapping = mapping
self.ext_to_loader_name = dict()
for loader_name, defining_dict in mapping.items():
self.extensions |= defining_dict["extensions"]
for ext in defining_dict["extensions"]:
if ext in self.ext_to_loader_name:
raise RuntimeError(
"Extension {:s} was passed for multiple loaders".format(ext)
)
self.ext_to_loader_name[ext] = loader_name
def __call__(self, path: Path, **kwargs):
"""Apply loader to path
Args:
path (Path): path to file.
kwargs: kwargs passed to load methods
Returns:
Image: a PIL image of the given path
Raises:
RuntimeError: If loader for path extension not specified.
"""
file_ending = splitext(path)[1].lower()
if file_ending in self.extensions:
return self.mapping[self.ext_to_loader_name[file_ending]]["loader"](
path, **kwargs
)
else:
raise RuntimeError(
"file extension does not match specified supported extensions. "
"Please provide the matching loader for the {:s} extension.".format(
file_ending
)
)
def change_channels(self, num_channels: int):
"""Change the number of channels that are loaded (Default: 3)
Args:
num_channels (int): Number of channels. Currently only 1 and 3 supported
Raises:
RuntimeError: if num_channels is not 1 or 3
"""
if num_channels not in [1, 3]:
raise RuntimeError("Only 1 or 3 channels supported yet.")
self.mapping["default"]["loader"] = (
single_channel_loader if num_channels == 1 else default_loader
)
self.mapping["dicom"]["loader"] = DicomLoader(num_channels)
def create_albu_transform(args, mean, std):
train_tf = transforms.RandomAffine(
degrees=args.rotation,
translate=(args.translate, args.translate),
scale=(1.0 - args.scale, 1.0 + args.scale),
shear=args.shear,
# fillcolor=0,
)
start_transformations = [
a.Resize(args.inference_resolution, args.inference_resolution),
a.RandomCrop(args.train_resolution, args.train_resolution),
]
if args.clahe:
start_transformations.extend(
[
a.FromFloat(dtype="uint8", max_value=1.0),
a.CLAHE(always_apply=True, clip_limit=(1, 1)),
]
)
train_tf_albu = [
a.VerticalFlip(p=args.individual_albu_probs),
]
if args.randomgamma:
train_tf_albu.append(a.RandomGamma(p=args.individual_albu_probs))
if args.randombrightness:
train_tf_albu.append(a.RandomBrightness(p=args.individual_albu_probs))
if args.blur:
train_tf_albu.append(a.Blur(p=args.individual_albu_probs))
if args.elastic:
train_tf_albu.append(a.ElasticTransform(p=args.individual_albu_probs))
if args.optical_distortion:
train_tf_albu.append(a.OpticalDistortion(p=args.individual_albu_probs))
if args.grid_distortion:
train_tf_albu.append(a.GridDistortion(p=args.individual_albu_probs))
if args.grid_shuffle:
train_tf_albu.append(a.RandomGridShuffle(p=args.individual_albu_probs))
if args.hsv:
train_tf_albu.append(a.HueSaturationValue(p=args.individual_albu_probs))
if args.invert:
train_tf_albu.append(a.InvertImg(p=args.individual_albu_probs))
if args.cutout:
train_tf_albu.append(
a.Cutout(
num_holes=5, max_h_size=80, max_w_size=80, p=args.individual_albu_probs
)
)
if args.shadow:
assert args.pretrained, "RandomShadows needs 3 channels"
train_tf_albu.append(a.RandomShadow(p=args.individual_albu_probs))
if args.fog:
assert args.pretrained, "RandomFog needs 3 channels"
train_tf_albu.append(a.RandomFog(p=args.individual_albu_probs))
if args.sun_flare:
assert args.pretrained, "RandomSunFlare needs 3 channels"
train_tf_albu.append(a.RandomSunFlare(p=args.individual_albu_probs))
if args.solarize:
train_tf_albu.append(a.Solarize(p=args.individual_albu_probs))
if args.equalize:
train_tf_albu.append(a.Equalize(p=args.individual_albu_probs))
if args.grid_dropout:
train_tf_albu.append(a.GridDropout(p=args.individual_albu_probs))
train_tf_albu.append(a.GaussNoise(var_limit=args.noise_std ** 2, p=args.noise_prob))
end_transformations = [
a.ToFloat(max_value=255.0),
a.Normalize(mean, std, max_pixel_value=1.0),
]
if not args.pretrained:
end_transformations.append(
a.Lambda(image=lambda x, **kwargs: x[:, :, np.newaxis])
)
train_tf_albu = AlbumentationsTorchTransform(
a.Compose(
[
a.Compose(start_transformations),
a.Compose(train_tf_albu, p=args.albu_prob),
a.Compose(end_transformations),
]
)
)
return transforms.Compose([train_tf, train_tf_albu,])
def calc_mean_std(dataset, save_folder=None):
"""
Calculates the mean and standard deviation of `dataset` and
saves them to `save_folder`.
Needs a dataset where all images have the same size
"""
accumulated_data = []
for d in tqdm(
dataset, total=len(dataset), leave=False, desc="accumulate data in dataset"
):
if type(d) is tuple or type(d) is list:
d = d[0]
accumulated_data.append(d)
if isinstance(dataset, torchdata.Dataset):
accumulated_data = stack(accumulated_data)
elif isinstance(dataset, torchdata.DataLoader):
accumulated_data = cat(accumulated_data)
else:
raise NotImplementedError("don't know how to process this data input class")
if accumulated_data.shape[1] in [1, 3]: # ugly hack
dims = (0, *range(2, len(accumulated_data.shape)))
else:
dims = (*range(len(accumulated_data.shape)),)
std, mean = std_mean(accumulated_data, dim=dims)
if save_folder:
save(stack([mean, std]), os.path.join(save_folder, "mean_std.pt"))
return mean, std
def single_channel_loader(filename):
"""Converts `filename` to a grayscale PIL Image
"""
with open(filename, "rb") as f:
img = Image.open(f).convert("L")
return img.copy()
class LabelMNIST(MNIST):
def __init__(self, labels, *args, **kwargs):
super().__init__(*args, **kwargs)
indices = np.isin(self.targets, labels).astype("bool")
self.data = self.data[indices]
self.targets = self.targets[indices]
class PathDataset(torchdata.Dataset):
def __init__(
self,
root,
transform=None,
loader=CombinedLoader(),
extensions=[
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
".dcm",
".dicom",
],
):
super(PathDataset, self).__init__()
self.root = root
self.transform = transform
self.loader = loader
self.imgs = [
f
for f in os.listdir(root)
if os.path.splitext(f)[1].lower() in extensions
and not os.path.split(f)[1].lower().startswith("._")
]
def __len__(self):
return len(self.imgs)
def __getitem__(self, idx):
img_path = self.imgs[idx]
img = self.loader(os.path.join(self.root, img_path))
if self.transform:
img = self.transform(img)
return img
class RemoteTensorDataset(torchdata.Dataset):
def __init__(self, tensor):
self.tensor = tensor
def __len__(self):
return self.tensor.shape[0]
def __getitem__(self, idx):
return self.tensor[idx].copy()
class ImageFolderFromCSV(torchdata.Dataset):
def __init__(
self, csv_path, img_folder_path, transform=None, target_transform=None
):
super().__init__()
self.transform = transform
self.target_transform = target_transform
self.img_folder_path = img_folder_path
self.img_files = [
i for i in os.listdir(img_folder_path) if not i.startswith(".")
]
metastats = pd.read_csv(csv_path)
metastats["class_label"] = metastats.apply(
ImageFolderFromCSV.__meta_to_class__, axis=1
)
self.categorize_dict = dict(
zip(metastats.X_ray_image_name, metastats.class_label)
)
for img in self.img_files:
assert (
img in self.categorize_dict.keys()
), "img label not known {:s}".format(str(img))
if self.categorize_dict[img] == -1:
self.img_files.remove(img)
print("Ignore image {:s} because category is certain".format(img))
@staticmethod
def __meta_to_class__(row):
if row["Label"] == "Normal":
return 0
if row["Label"] == "Pnemonia": # i know this is a typo but was in original csv
if row["Label_1_Virus_category"] == "bacteria":
return 1
if row["Label_1_Virus_category"] == "Virus":
return 2
return -1
def __getitem__(self, i):
img_path = self.img_files[i]
label = self.categorize_dict[img_path]
img = single_channel_loader(os.path.join(self.img_folder_path, img_path))
if self.transform:
img = self.transform(img)
if self.target_transform:
label = self.target_transform(label)
return img, label
def __len__(self):
return len(self.img_files)
class PPPP(torchdata.Dataset):
def __init__(
self, label_path="data/Labels.csv", train=False, transform=None, seed=1,
):
super().__init__()
random.seed(seed)
manual_seed(seed)
self.train = train
self.labels = pd.read_csv(label_path)
self.labels = self.labels[
self.labels["Dataset_type"] == ("TRAIN" if train else "TEST")
]
self.transform = transform
"""
Split into train and validation set
if self.train:
indices = [
i
for i in range(len(self.labels))
if ((i % self.val_split) != 0 and self.val)
or (not self.val and (i % self.val_split) == 0)
]
self.labels = self.labels.drop(index=indices)
"""
def __len__(self):
return len(self.labels)
def __getitem__(self, index):
row = self.labels.iloc[index]
label = row["Numeric_Label"]
path = "train" if self.train else "test"
path = os.path.join("data", path, row["X_ray_image_name"])
img = single_channel_loader(path)
if self.transform:
img = self.transform(img)
return img, label
# def get_class_name(self, numeric_label):
# return self.class_names[numeric_label]
"""
Works only if not torch.utils.torchdata.random_split is applied
"""
def get_class_occurances(self):
return dict(self.labels["Numeric_Label"].value_counts())
def __compute_mean_std__(self):
calc_mean_std(
self, save_folder="data",
)
##This is from torch.data.utils and adapted for our purposes
class Subset(torchdata.Dataset):
def __init__(self, dataset, indices):
self.dataset = deepcopy(dataset)
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def random_split(dataset, lengths, generator=default_generator):
if sum(lengths) != len(dataset):
raise ValueError(
"Sum of input lengths does not equal the length of the input dataset!"
)
indices = randperm(sum(lengths), generator=generator).tolist()
return [
Subset(dataset, indices[offset - length : offset])
for offset, length in zip(_accumulate(lengths), lengths)
]
if __name__ == "__main__":
# import matplotlib.pyplot as plt
import sys
from tqdm import tqdm
import numpy as np
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
from torchlib.utils import AddGaussianNoise
ds = PPPP(train=True, transform=transforms.ToTensor())
print("Class distribution")
print(ds.get_class_occurances())
sizes = []
for data, _ in tqdm(ds, total=len(ds), leave=False):
sizes.append(data.size()[1:])
sizes = np.array(sizes)
print(
"data resolution stats: \n\tmin: {:s}\n\tmax: {:s}\n\tmean: {:s}\n\tmedian: {:s}".format(
str(np.min(sizes, axis=0)),
str(np.max(sizes, axis=0)),
str(np.mean(sizes, axis=0)),
str(np.median(sizes, axis=0)),
)
)
ds = PPPP(train=False)
L = len(ds)
print("length test set: {:d}".format(L))
img, label = ds[1]
img.show()
tf = transforms.Compose(
[transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor(),]
)
ds = PPPP(train=True, transform=tf)
ds.__compute_mean_std__()
L = len(ds)
print("length train set: {:d}".format(L))
from matplotlib import pyplot as plt
ds = PPPP()
hist = ds.labels.hist(bins=3, column="Numeric_Label")
plt.show()
| 2.1875 | 2 |
katas/kyu_7/formatting_decimal_places_1.py | the-zebulan/CodeWars | 40 | 12790441 | from math import trunc
def two_decimal_places(number):
factor = float(10 ** 2)
return trunc(number * factor) / factor
| 2.640625 | 3 |
conplyent/console.py | joshijayesh/conplyent | 0 | 12790442 | <gh_stars>0
'''
:File: console.py
:Author: <NAME>
:Email: <EMAIL>
'''
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from queue import Queue
from ._decorators import timeout
from .exceptions import ConsoleExecTimeout
class ConsoleExecutor():
'''
Simple wrapper around subprocess to provide a non-blocking read from
stdout and stderr. This executor will start the subprocess using
universal_newlines=True, shell=False, and start_new_session=True to provide
the most responsive and proper executor. If the user specifies any kwargs
that should be sent to Popen, then they can override these settings.
This subprocess will start as the class is initialized and the class will
start a background thread that continually reads the output from the
executor. This way, if the user wants to read the output, we can simply
check to see if the background thread has done anything by timing out this
main thread and allowing the background thread to do its job. This also
allows users to timeout any read requests if user just wants to check if
there are any output.
This class also provides other interactive means to communicate with the
subprocess such as sending input and terminating.
'''
def __init__(self, cmd, **kwargs):
self._cmd = cmd
if(kwargs):
self.__popen = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE, **kwargs)
else:
self.__popen = Popen(cmd, stdout=PIPE, stderr=STDOUT, stdin=PIPE, universal_newlines=True, shell=False,
start_new_session=True)
self.__queue = Queue()
self.__bg_worker = Thread(target=ConsoleExecutor.__file_reader, args=(self.__queue, self.__popen.stdout),
daemon=True)
self.__bg_worker.start()
self._alive = True
@property
def cmd(self):
'''
Command linked to this executor.
:getter: (int command that is being executed.
'''
return self._cmd
@property
def returncode(self):
'''
Exit code from the subprocess. Only set once the subprocess has exited.
:getter: (int) any exit code from process
'''
return self.__popen.returncode
@property
def alive(self):
'''
Determines if the subprocess is still alive or not. The background
thread used to read in any output from the subprocess will have exited
once the subprocess has completed.
:getter: (bool) True if subprocess is still alive. False if completed or
exited otherways.
'''
return self.__bg_worker.is_alive()
@property
def empty(self):
'''
Determines if there is no more output left in the bg executor.
:getter: (bool) True if no more output. False otherwise
'''
return self.__queue.empty()
def read_output(self, timeout=None):
'''
Reads the next output from the subprocess. These must be sent by flushed
stdout or stderr.
By default, this method will poll forever until the subprocess has
passed any output. Users can define timeout to wait only a specific
amount of time for the next output.
:param timeout: Amount of time in seconds to wait for the next output.
:type timeout: int
:returns: Output read from the subprocess. This value will be None if
the subprocess has exited.
:raises ConsoleExecTimeout: If user specifies a non-None/non-Negative
timeout and subprocess has not responded in time.
'''
if(not(self.__queue.empty())):
return self.__queue.get()
while(True):
if(self.alive):
self.__poll_queue(timeout=timeout, exception=ConsoleExecTimeout)
if(self.__queue.empty()):
if(not(self.alive)):
self.__popen.wait()
if(not(self.__queue.empty())):
return self.__queue.get()
return None
else:
return self.__queue.get() # should never halt here...
else:
self.__popen.wait()
if(not(self.__queue.empty())):
return self.__queue.get()
return None
def send_input(self, value, new_line=True):
'''
Allows users to send input to the subprocess. This input will be flushed
into the subprocess to ensure that the input will be read. To acheive
this, this method will automatically add an extra new line if the user
hasn't specified a new line. This automatic behavior can be disabled by
optional user input new_line
:param value: Message to send to the subprocess.
:type value: str
:param new_line: True if method should add a new line if missing. False
to ignore this feature.
:type new_line: bool
'''
if(self.alive):
self.__popen.stdin.write(value + "\n" if value[:-1] != "\n" and new_line else value)
self.__popen.stdin.flush()
def kill(self):
'''
Terminates the subprocess and waits for it to exit gracefully. Currently
this will not stop any child processes spawned by our subprocess.
'''
self.__popen.terminate()
self.__popen.wait()
def close(self):
'''
Closes off any FDs open by this class to properly clear any memory used
by this subprocess. Terminates subprocess if alive.
'''
if(self.alive):
self.kill()
self.__bg_worker.join()
self.__popen.wait()
self.__popen.stdin.close()
self.__popen.stdout.close()
@timeout(name="Polling Subprocess")
def __poll_queue(self, **kwargs):
while(self.__queue.empty() and self.__bg_worker.is_alive()):
yield None
def __file_reader(queue, file):
for line in iter(file.readline, b'' or ''):
queue.put(line)
file.close()
| 3.21875 | 3 |
{{cookiecutter.project_name}}/tests/data/data_fib.py | michael-c-hoffman/python-best-practices-cookiecutter | 0 | 12790443 | # pylint: disable-all
tests = [
(0, 0),
(1, 1),
(2, 1),
(3, 2),
(4, 3),
(6, 8),
(7, 13),
(8, 21),
(9, 34),
(10, 55),
(11, 89),
(12, 144),
(13, 233),
(14, 377),
(15, 610),
(17, 1597),
(18, 2584),
(19, 4181),
(20, 6765),
]
| 1.3125 | 1 |
predict_train_embeddings.py | simphide/Kaggle-2020-Alaska2 | 21 | 12790444 | import warnings
warnings.simplefilter("ignore", UserWarning)
warnings.simplefilter("ignore", FutureWarning)
import argparse
import os
import pandas as pd
import numpy as np
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from collections import defaultdict
from catalyst.utils import any2device
from pytorch_toolbelt.utils import to_numpy, fs
from pytorch_toolbelt.utils.catalyst import report_checkpoint
from alaska2 import *
from alaska2.dataset import get_train_except_holdout
@torch.no_grad()
def compute_trn_predictions(model, dataset, fp16=False, batch_size=1, workers=0) -> pd.DataFrame:
df = defaultdict(list)
for batch in tqdm(
DataLoader(
dataset, batch_size=batch_size, num_workers=workers, shuffle=False, drop_last=False, pin_memory=True
)
):
batch = any2device(batch, device="cuda")
if fp16 and INPUT_FEATURES_JPEG_FLOAT in batch:
batch[INPUT_FEATURES_JPEG_FLOAT] = batch[INPUT_FEATURES_JPEG_FLOAT].half()
if INPUT_TRUE_MODIFICATION_FLAG in batch:
y_trues = to_numpy(batch[INPUT_TRUE_MODIFICATION_FLAG]).flatten()
df[INPUT_TRUE_MODIFICATION_FLAG].extend(y_trues)
if INPUT_TRUE_MODIFICATION_TYPE in batch:
y_labels = to_numpy(batch[INPUT_TRUE_MODIFICATION_TYPE]).flatten()
df[INPUT_TRUE_MODIFICATION_TYPE].extend(y_labels)
image_ids = batch[INPUT_IMAGE_ID_KEY]
df[INPUT_IMAGE_ID_KEY].extend(image_ids)
outputs = model(**batch)
if OUTPUT_PRED_MODIFICATION_FLAG in outputs:
df[OUTPUT_PRED_MODIFICATION_FLAG].extend(to_numpy(outputs[OUTPUT_PRED_MODIFICATION_FLAG]).flatten())
if OUTPUT_PRED_MODIFICATION_TYPE in outputs:
df[OUTPUT_PRED_MODIFICATION_TYPE].extend(outputs[OUTPUT_PRED_MODIFICATION_TYPE].tolist())
if OUTPUT_PRED_EMBEDDING in outputs:
df[OUTPUT_PRED_EMBEDDING].extend(outputs[OUTPUT_PRED_EMBEDDING].tolist())
# Save also TTA predictions for future use
if OUTPUT_PRED_MODIFICATION_FLAG + "_tta" in outputs:
df[OUTPUT_PRED_MODIFICATION_FLAG + "_tta"].extend(
to_numpy(outputs[OUTPUT_PRED_MODIFICATION_FLAG + "_tta"]).tolist()
)
if OUTPUT_PRED_MODIFICATION_TYPE + "_tta" in outputs:
df[OUTPUT_PRED_MODIFICATION_TYPE + "_tta"].extend(
to_numpy(outputs[OUTPUT_PRED_MODIFICATION_TYPE + "_tta"]).tolist()
)
df = pd.DataFrame.from_dict(df)
return df
@torch.no_grad()
def main():
# Give no chance to randomness
torch.manual_seed(0)
np.random.seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
parser = argparse.ArgumentParser()
parser.add_argument("checkpoint", type=str, nargs="+")
parser.add_argument("-dd", "--data-dir", type=str, default=os.environ.get("KAGGLE_2020_ALASKA2"))
parser.add_argument("-b", "--batch-size", type=int, default=1)
parser.add_argument("-w", "--workers", type=int, default=0)
parser.add_argument("-d4", "--d4-tta", action="store_true")
parser.add_argument("-hv", "--hv-tta", action="store_true")
parser.add_argument("-f", "--force-recompute", action="store_true")
parser.add_argument("-fp16", "--fp16", action="store_true")
args = parser.parse_args()
checkpoint_fnames = args.checkpoint
data_dir = args.data_dir
batch_size = args.batch_size
workers = args.workers
fp16 = args.fp16
d4_tta = args.d4_tta
force_recompute = args.force_recompute
need_embedding = True
outputs = [OUTPUT_PRED_MODIFICATION_FLAG, OUTPUT_PRED_MODIFICATION_TYPE, OUTPUT_PRED_EMBEDDING]
embedding_suffix = "_w_emb" if need_embedding else ""
for checkpoint_fname in checkpoint_fnames:
model, checkpoints, required_features = ensemble_from_checkpoints(
[checkpoint_fname], strict=True, outputs=outputs, activation=None, tta=None, need_embedding=need_embedding
)
report_checkpoint(checkpoints[0])
model = model.cuda()
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
model = model.eval()
if fp16:
model = model.half()
train_ds = get_train_except_holdout(data_dir, features=required_features)
holdout_ds = get_holdout(data_dir, features=required_features)
test_ds = get_test_dataset(data_dir, features=required_features)
if d4_tta:
model = wrap_model_with_tta(model, "d4", inputs=required_features, outputs=outputs).eval()
tta_suffix = "_d4_tta"
else:
tta_suffix = ""
# Train
trn_predictions_csv = fs.change_extension(
checkpoint_fname, f"_train_predictions{embedding_suffix}{tta_suffix}.pkl"
)
if force_recompute or not os.path.exists(trn_predictions_csv):
trn_predictions = compute_trn_predictions(
model, train_ds, fp16=fp16, batch_size=batch_size, workers=workers
)
trn_predictions.to_pickle(trn_predictions_csv)
# Holdout
hld_predictions_csv = fs.change_extension(
checkpoint_fname, f"_holdout_predictions{embedding_suffix}{tta_suffix}.pkl"
)
if force_recompute or not os.path.exists(hld_predictions_csv):
hld_predictions = compute_trn_predictions(
model, holdout_ds, fp16=fp16, batch_size=batch_size, workers=workers
)
hld_predictions.to_pickle(hld_predictions_csv)
# Test
tst_predictions_csv = fs.change_extension(
checkpoint_fname, f"_test_predictions{embedding_suffix}{tta_suffix}.pkl"
)
if force_recompute or not os.path.exists(tst_predictions_csv):
tst_predictions = compute_trn_predictions(
model, test_ds, fp16=fp16, batch_size=batch_size, workers=workers
)
tst_predictions.to_pickle(tst_predictions_csv)
if __name__ == "__main__":
main()
| 1.976563 | 2 |
common/hil_slurm_client.py | mghpcc-projects/user_level_slurm_reservations | 0 | 12790445 | <filename>common/hil_slurm_client.py
"""
MassOpenCloud / Hardware Isolation Layer (MOC/HIL)
HIL Client Interface
August 2017, <NAME> <EMAIL>
"""
import urllib
import time
from hil.client.client import Client, RequestsHTTPClient
from hil.client.base import FailedAPICallException
from hil_slurm_logging import log_info, log_debug, log_error
from hil_slurm_settings import HIL_ENDPOINT, HIL_USER, HIL_PW
# timeout ensures that networking actions are completed in a resonable time.
HIL_TIMEOUT = 20
DEBUG = False
class HILClientFailure(Exception):
"""Exception indicating that the HIL client failed"""
class ProjectMismatchError(Exception):
"""Raised when projects don't match"""
def _hil_client_connect(endpoint_ip, name, pw):
'''
Connect to the HIL server and return a HIL Client instance
Note this call will succeed if the API server is running, but the network server is down '''
hil_http_client = RequestsHTTPClient()
if not hil_http_client:
log_error('Unable to create HIL HTTP Client')
return None
hil_http_client.auth = (name, pw)
c = Client(endpoint_ip, hil_http_client)
if not c:
log_error('Unable to create HIL client')
return c
def hil_init():
return _hil_client_connect(HIL_ENDPOINT, HIL_USER, HIL_PW)
def check_hil_interface():
hil_client = hil_init()
def hil_reserve_nodes(nodelist, from_project, hil_client=None):
'''
Cause HIL nodes to move from the 'from' project to the HIL free pool.
Typically, the 'from' project is the Slurm loaner project.
This methods first powers off the nodes, then disconnects all networks,
then moves the node from the 'from' project to the free pool.
We power off the nodes before removing the networks because the IPMI
network is also controlled by HIL. If we removed all networks, then we will
not be able to perform any IPMI operations on nodes.
'''
if not hil_client:
hil_client = hil_init()
# Get information from node and ensure that the node is actually connected
# to <from_project> before proceeding.
# iterate over a copy of nodelist, otherwise we can't modify it.
for node in nodelist[:]:
node_info = show_node(hil_client, node)
project = node_info['project']
# if node already in the free pool, skip any processing.
if project is None:
log_info('HIL release: Node `%s` already in the free pool, skipping' % node)
nodelist.remove(node)
elif (project != from_project):
log_error('HIL reservation failure: Node `%s` (in project `%s`) not in `%s` project' % (node, project, from_project))
raise ProjectMismatchError()
# Power off all nodes.
for node in nodelist:
power_off_node(hil_client, node)
# Remove all networks from nodes.
for node in nodelist:
try:
_remove_all_networks(hil_client, node)
except:
log_error('Failed to remove networks from node %s' % node)
continue
# Finally, remove node from project.
for node in nodelist:
try:
_ensure_no_networks(hil_client, node)
except:
log_error('Failed to ensure node %s is disconnected from all networks' % node)
continue
# tries 10 times to detach the project because there might be a pending
# networking action setup by revert port in the previous step.
counter = 10
while counter:
try:
hil_client.project.detach(from_project, node)
log_info('Node `%s` removed from project `%s`' % (node, from_project))
break
except FailedAPICallException as ex:
if ex.message == 'Node has pending network actions':
counter -= 1
time.sleep(0.5)
else:
log_error('HIL reservation failure: Unable to detach node `%s` from project `%s`' % (node, from_project))
raise HILClientFailure(ex.message)
if counter == 0:
log_error('HIL reservation failure: Unable to detach node `%s` from project `%s`' % (node, from_project))
raise HILClientFailure()
def hil_free_nodes(nodelist, to_project, hil_client=None):
'''
Cause HIL nodes to move the HIL free pool to the 'to' project.
Typically, the 'to' project is the Slurm loaner project.
This method first powers off the nodes, then disconnects all networks,
then moves the node from the free pool to the 'to' project.
We power off the nodes before removing the networks because the IPMI
network is also controlled by HIL. If we removed all networks, then we will
not be able to perform any IPMI operations on nodes.
'''
if not hil_client:
hil_client = hil_init()
# Get information from node and ensure that the node is actually connected
# to <from_project> before proceeding.
# iterate over a copy of nodelist, otherwise we can't modify it.
for node in nodelist[:]:
node_info = show_node(hil_client, node)
# If the node is in the Slurm project now, skip further processing, but don't indicate
# failure.
project = node_info['project']
if (project == to_project):
log_info('HIL release: Node `%s` already in `%s` project, skipping' % (node, to_project))
nodelist.remove(node)
# Finally, connect node to <to_project>
for node in nodelist:
try:
hil_client.project.connect(to_project, node)
log_info('Node `%s` connected to project `%s`' % (node, to_project))
except FailedAPICallException, ConnectionError:
log_error('HIL reservation failure: Unable to connect node `%s` to project `%s`' % (node, to_project))
raise HILClientFailure()
def _remove_all_networks(hil_client, node):
'''
Disconnect all networks from all of the node's NICs
'''
node_info = show_node(hil_client, node)
# get node information and then iterate on the nics
for nic in node_info['nics']:
# get the port and switch to which the nics are connected to
port = nic['port']
switch = nic['switch']
if port and switch:
try:
hil_client.port.port_revert(switch, port)
log_info('Removed all networks from node `%s`' % node)
except FailedAPICallException, ConnectionError:
log_error('Failed to revert port `%s` on node `%s` switch `%s`' % (port, node, switch))
raise HILClientFailure()
def _ensure_no_networks(hil_client, node):
"""Polls on the output of show node to check if networks have been removed.
It will timeout and raise an exception if it's taking too long.
"""
connected_to_network = True
end_time = time.time() + HIL_TIMEOUT
while connected_to_network:
if time.time() > end_time:
raise HILClientFailure('Networks not removed from node in reasonable time')
node_info = show_node(hil_client, node)
for nic in node_info['nics']:
if nic['networks']:
connected_to_network = True
break
else:
connected_to_network = False
# don't tight loop.
time.sleep(0.5)
return
def show_node(hil_client, node):
"""Returns node information and takes care of handling exceptions"""
try:
node_info = hil_client.node.show(node)
return node_info
except FailedAPICallException, ConnectionError:
# log a note for the admins, and the exact exception before raising
# an error.
log_error('HIL reservation failure: HIL node info unavailable, node `%s`' % node)
raise HILClientFailure()
def power_off_node(hil_client, node):
try:
hil_client.node.power_off(node)
log_info('Node `%s` succesfully powered off' % node)
except FailedAPICallException, ConnectionError:
log_error('HIL reservation failure: Unable to power off node `%s`' % node)
raise HILClientFailure()
| 2.40625 | 2 |
terra_sdk/core/auth/data/account.py | terra-money/terra.py | 66 | 12790446 | from abc import ABC, abstractmethod
from terra_sdk.core.public_key import PublicKey
from terra_sdk.util.json import JSONSerializable
from .base_account import BaseAccount
from .continuous_vesting_account import ContinuousVestingAccount
from .delayed_vesting_account import DelayedVestingAccount
from .periodic_vesting_account import PeriodicVestingAccount
class Account(JSONSerializable, ABC):
@abstractmethod
def get_account_number(self) -> int:
pass
@abstractmethod
def get_sequence(self) -> int:
pass
@abstractmethod
def get_public_key(self) -> PublicKey:
pass
@classmethod
def from_amino(cls, amino: dict): # -> Account:
if amino["type"] == BaseAccount.type_amino:
return BaseAccount.from_amino(amino)
elif amino["type"] == ContinuousVestingAccount.type_amino:
return ContinuousVestingAccount.from_amino(amino)
elif amino["type"] == DelayedVestingAccount.type_amino:
return DelayedVestingAccount.from_amino(amino)
elif amino["type"] == PeriodicVestingAccount.type_amino:
return PeriodicVestingAccount.from_amino(amino)
@classmethod
def from_data(cls, data: dict): # -> Account:
if data["@type"] == BaseAccount.type_url:
return BaseAccount.from_data(data)
elif data["@type"] == ContinuousVestingAccount.type_url:
return ContinuousVestingAccount.from_data(data)
elif data["@type"] == DelayedVestingAccount.type_url:
return DelayedVestingAccount.from_data(data)
elif data["@type"] == PeriodicVestingAccount.type_url:
return PeriodicVestingAccount.from_data(data)
| 2.421875 | 2 |
Course/models.py | Viet782000/CourseAPITest | 0 | 12790447 | from django.db import models
class Course(models.Model):
created = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100, blank=True, default='')
content = models.TextField()
owner = models.ForeignKey('auth.User', related_name='Course', on_delete=models.CASCADE)
class Meta:
ordering = ['created']
| 2.15625 | 2 |
mll_calc/htc_prep.py | opotowsky/like-me-fuel | 0 | 12790448 | #! /usr/bin/env python3
import sys
import csv
import argparse
import numpy as np
import pandas as pd
from mll_calc.all_jobs import parent_jobs, kid_jobs
def row_calcs(ext_test):
if 'no' in ext_test:
#db_rows = 450240
#max_jobs = 9750
db_rows = 90048 * 4
max_jobs = 978 * 4
else:
db_rows = 505
max_jobs = 10
n_rows = db_rows // max_jobs
init_rows = np.arange(0, db_rows, n_rows).tolist()
end_rows = init_rows[1:]
# TODO took out +1 below because had index 1 too high last time
end_rows.append(db_rows)
################################################
################ In-script test ################
################################################
if db_rows % n_rows == 0:
total_jobs = db_rows // n_rows
else:
total_jobs = db_rows // n_rows + 1
if len(init_rows) != total_jobs or len(end_rows) != total_jobs:
print(total_jobs, len(init_rows), len(end_rows))
sys.exit('total expected jobs does not equal one of db_row lists')
################################################
return init_rows, end_rows
def make_paramstxt(parent_job, kid_jobs):
parent_dir = parent_job['parent_dir']
fname = parent_dir + '_params.txt'
init_rows, end_rows = row_calcs(parent_job['ext_test'])
for unc_num, (kid_dir, unc) in enumerate(zip(kid_jobs['job_dirs'], kid_jobs['uncs'])):
if parent_dir == 'train_nuc29':
fname = parent_dir + '_' + str(unc_num) + '_params.txt'
#with open(fname, 'w') as f:
with open(fname, 'a') as f:
w = csv.writer(f)
job_dir = parent_dir + '/' + kid_dir
for i in range(0, len(init_rows)):
job = [job_dir, unc,
parent_job['train_pkl'], parent_job['test_pkl'],
str(i).zfill(4), init_rows[i], end_rows[i],
parent_job['ext_test'], parent_job['ratios']
]
w.writerow(job)
return
def main():
"""
Reads all the job descriptions from all_jobs.py and populates the necessary
params_mll_calc.txt files
"""
for parent_job in parent_jobs:
make_paramstxt(parent_job, kid_jobs)
return
if __name__ == "__main__":
main()
| 2.703125 | 3 |
MTCNN/data_set/preprocess.py | gm19900510/License_Plate_Detection_Pytorch | 10 | 12790449 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 8 10:17:20 2019
The code is designed to split the data for train and validation
@author: xingyu
"""
from imutils import paths
import numpy as np
import cv2
import os
import argparse
import random
parser = argparse.ArgumentParser(description='crop the licence plate from original image')
parser.add_argument("-image", help='image path', default='../../ccpd/ccpd_dataset/ccpd_weather', type=str)
parser.add_argument("-dir_train", help='save directory', default='ccpd_train', type=str)
parser.add_argument("-dir_val", help='save directory', default='ccpd_val', type=str)
parser.add_argument("-size", help='the number of images to be saved', default=5000, type=int)
args = parser.parse_args()
img_paths = []
img_paths += [el for el in paths.list_images(args.image)]
random.shuffle(img_paths)
save_dir_train = args.dir_train
save_dir_val = args.dir_val
print('image data processing is kicked off...')
print("%d images in total" % len(img_paths))
idx = 0
idx_train = 0
idx_val = 0
for i in range(len(img_paths)):
filename = img_paths[i]
basename = os.path.basename(filename)
img = cv2.imread(filename)
idx += 1
if idx % 100 == 0:
print("%d images done" % idx)
if idx % 4 == 0:
save = save_dir_val+'/'+basename
cv2.imwrite(save, img)
idx_val += 1
else:
save = save_dir_train+'/'+basename
cv2.imwrite(save, img)
idx_train += 1
if idx == args.size:
break
print('image data processing done, write %d training images, %d val images' % (idx_train, idx_val))
| 2.59375 | 3 |
2015/21/rpg.py | lvaughn/advent | 0 | 12790450 | <filename>2015/21/rpg.py
#!/usr/bin/env python3
from collections import namedtuple
from itertools import combinations
Item = namedtuple('Item', ['name', 'cost', 'damage', 'armor'])
boss_hp = 109
boss_damage = 8
boss_armor = 2
weapons = [
Item('Dagger', 8, 4, 0),
Item('Shortsword', 10, 5, 0),
Item('Warhammer', 25, 6, 0),
Item('Longsword', 40, 7, 0),
Item('Greataxe', 74, 8, 0)
]
armors = [
Item('Leather', 13, 0, 1),
Item('Chainmail', 31, 0, 2),
Item('Splintmail', 53, 0, 3),
Item('Bandedmail', 75, 0, 4),
Item('Platemail', 102, 0, 5),
]
rings = [
Item('Damage +1', 25, 1, 0),
Item('Damage +2', 50, 2, 0),
Item('Damage +3', 100, 3, 0),
Item('Defense +1', 20, 0, 1),
Item('Defense +2', 40, 0, 2),
Item('Defense +3', 80, 0, 3),
]
def winningCombo(armor, damage):
p_hp = 100
b_hp = boss_hp
turn = 0 # player is zero
while b_hp > 0 and p_hp > 0:
if turn == 0:
b_hp -= max(1, damage-boss_armor)
else:
p_hp -= max(1,boss_damage - armor)
turn = 1 - turn
return p_hp > 0
ring_combos = [[]]
ring_combos.extend(combinations(rings, 2))
ring_combos.extend(combinations(rings, 1))
cheapest_win = 1000000
for r in ring_combos:
for w in weapons:
for a in armors:
cost = w.cost + a.cost
damage = w.damage + a.damage
armor = w.armor + a.armor
for ring in r:
cost += ring.cost
damage += ring.damage
armor += ring.armor
if cost < cheapest_win and winningCombo(armor, damage):
cheapest_win = cost
print("Best Combo", cost, [x.name for x in r], w.name, a.name)
expensive_win = -1
for r in ring_combos:
for w in weapons:
cost = w.cost
damage = w.damage
armor = w.armor
for ring in r:
cost += ring.cost
damage += ring.damage
armor += ring.armor
if cost > expensive_win and not winningCombo(armor, damage):
expensive_win = cost
print("Worst Combo (no armor)", cost, [x.name for x in r], w.name)
for a in armors:
cost = w.cost + a.cost
damage = w.damage + a.damage
armor = w.armor + a.armor
for ring in r:
cost += ring.cost
damage += ring.damage
armor += ring.armor
if cost > expensive_win and not winningCombo(armor, damage):
expensive_win = cost
print("Worst Combo", cost, [x.name for x in r], w.name, a.name)
| 3.0625 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.