id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
11236531
|
# flake8: noqa: E128
"""
Add cascades
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2019-05-02 10:57:00.579594+00:00
"""
# Stdlib
import re
# External Libraries
from alembic import op
# revision identifiers, used by Alembic.
revision = "<KEY>"
down_revision = "<PASSWORD>"
branch_labels = None
depends_on = None
constraint_re = re.compile(
r"^fk_" # Start tag
r"(.*)_" # Table
r"((?:author|user|mod|review)(?:_id)?)_" # Column
r"(.*)$" # Foreign table
)
constraints = [
"fk_connection_user_user",
"fk_editors_choice_author_id_user",
"fk_editors_choice_mod_id_mod",
"fk_media_mod_id_mod",
"fk_mod_playtester_mod_id_mod",
"fk_mod_playtester_user_id_user",
"fk_report_author_id_user",
"fk_report_mod_id_mod",
"fk_review_author_id_user",
"fk_review_mod_id_mod",
"fk_review_reaction_review_id_review",
"fk_review_reaction_user_id_user",
"fk_user_favorite_user_id_user",
"fk_user_favorite_mod_id_mod",
"fk_user_mod_user_id_user",
"fk_user_mod_mod_id_mod",
]
def upgrade():
for constraint in constraints:
# Get all parts of constraint (table[_table part 2]_column[_column part 2]_foreign_table)
table, column, foreign = constraint_re.match(constraint).groups()
op.drop_constraint(constraint, table, type_="foreignkey")
op.create_foreign_key(
op.f(constraint), table, foreign, [column], ["id"], ondelete="CASCADE"
)
def downgrade():
for constraint in constraints:
# Get all parts of constraint (table[_table part 2]_column[_column part 2]_foreign_table)
table, column, foreign = constraint_re.match(constraint).groups()
op.drop_constraint(op.f(constraint), table, type_="foreignkey")
op.create_foreign_key(constraint, table, foreign, [column], ["id"])
|
StarcoderdataPython
|
6608349
|
<gh_stars>10-100
from __future__ import print_function
import numpy
import theano
from theano import gof
class AXPBOp(gof.Op):
"""
This creates an Op that takes x to a*x+b.
"""
__props__ = ("a", "b")
def __init__(self, a, b):
self.a = a
self.b = b
super(AXPBOp, self).__init__()
def make_node(self, x):
x = theano.tensor.as_tensor_variable(x)
return theano.Apply(self, [x], [x.type()])
def c_code_cache_version(self):
return (6, 1)
def c_support_code(self):
c_support_code = """
bool same_shape(PyArrayObject* arr1, PyArrayObject* arr2)
{
if( PyArray_NDIM(arr1) != PyArray_NDIM(arr2)) {
return false;
}
for(int i = 0; i < PyArray_NDIM(arr2) ; i++) {
if (PyArray_DIMS(arr1)[0] == PyArray_DIMS(arr2)[0]) {
return false;
}
}
return true;
}
"""
return c_support_code
def c_support_code_apply(self, node, name):
dtype_x = node.inputs[0].dtype
dtype_z = node.outputs[0].dtype
a = self.a
b = self.b
c_support_code = """
void elemwise_op_%(name)s(npy_%(dtype_x)s* x_ptr, npy_intp* x_str, int itemsize_x,
npy_%(dtype_z)s* z_ptr, npy_intp* z_str, int itemsize_z,
int nbDims, npy_intp* dims)
{
npy_intp stride_x = (npy_intp)(1);
npy_intp stride_z = (npy_intp)(1);
for (int i = 0; i < nbDims; i ++) {
stride_x = stride_x * x_str[i] / itemsize_x;
stride_z = stride_z * z_str[i] / itemsize_z;
}
for (int i=0; i < dims[0]; i++)
if (nbDims==1) {
z_ptr[i * z_str[0]/itemsize_z] = x_ptr[i * x_str[0] / itemsize_x] * ((npy_%(dtype_z)s) %(a)s) + ((npy_%(dtype_z)s)%(b)s);
} else {
elemwise_op_%(name)s( x_ptr + i * stride_x , x_str + 1, itemsize_x,
z_ptr + i * stride_z , z_str + 1, itemsize_z,
nbDims - 1, dims + 1 );
}
}
"""
return c_support_code % locals()
def c_code(self, node, name, inp, out, sub):
x = inp[0]
z = out[0]
dtype_x = node.inputs[0].dtype
dtype_z = node.outputs[0].dtype
itemsize_x = numpy.dtype(dtype_x).itemsize
itemsize_z = numpy.dtype(dtype_z).itemsize
typenum_z = numpy.dtype(dtype_z).num
fail = sub['fail']
c_code = """
// Validate that the output storage exists and has the same
// dimension as x.
if (NULL == %(z)s || !(same_shape(%(x)s, %(z)s)))
{
/* Reference received to invalid output variable.
Decrease received reference's ref count and allocate new
output variable */
Py_XDECREF(%(z)s);
%(z)s = (PyArrayObject*)PyArray_EMPTY(PyArray_NDIM(%(x)s),
PyArray_DIMS(%(x)s),
%(typenum_z)s,
0);
if (!%(z)s) {
%(fail)s;
}
}
// Perform the elemwise operation
((npy_%(dtype_z)s *)PyArray_DATA(%(z)s))[0] = 0;
elemwise_op_%(name)s((npy_%(dtype_x)s*)PyArray_DATA(%(x)s), PyArray_STRIDES(%(x)s), %(itemsize_x)s,
(npy_%(dtype_z)s*)PyArray_DATA(%(z)s), PyArray_STRIDES(%(z)s), %(itemsize_z)s,
PyArray_NDIM(%(x)s), PyArray_DIMS(%(x)s) );
"""
return c_code % locals()
mult4plus5op = AXPBOp(4,5)
x = theano.tensor.matrix()
y = mult4plus5op( x )
print(y)
theano.printing.pprint(y)
theano.printing.debugprint(y)
theano.printing.pydotprint(y)
print("Compiling")
f = theano.function([x], y)
theano.printing.debugprint(f)
print("Eval")
ind = numpy.random.rand(3,2).astype('float32')
print("Equality", numpy.allclose(f(ind), 4 * ind + 5 ))
print(mult4plus5op)
|
StarcoderdataPython
|
3536090
|
<reponame>weihsuanchou/algorithm_py<gh_stars>0
#i manually increase
from typing import List
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
if nums is None or nums == []: return 0
i = 0
for j in range(len(nums)):
if nums[j] != val:
#bring all follwing number one index forward
nums[i] = nums[j]
i += 1
return i
def main():
print( "hello")
s = Solution()
print("output: ", s.removeElement( [0,1,2,2,3,0,4,2], 2))
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3330457
|
"""Module of the routes of the Auth blueprint
"""
from flask import render_template, request, url_for, redirect, flash
from flask_login import login_user, current_user, login_required, logout_user
from app.auth import auth
from app.auth.forms import LoginForm
from app.models.administrator import Administrator
@auth.route('/login/', methods=['GET', 'POST'])
def login():
"""View of login to the backstage"""
if current_user.is_authenticated:
return redirect(url_for('admin.index'))
form = LoginForm()
if form.validate_on_submit():
admin = Administrator.query \
.filter_by(name=form.name.data).first()
if admin is not None and admin.verify_password(form.password.data):
login_user(admin, form.remember_me.data)
redirect_url = request.args.get('next')
if redirect_url is None or not redirect_url.startswith('/'):
redirect_url = url_for('admin.index')
return redirect(redirect_url)
flash('Invalid administrator name or password')
return render_template('/auth/login.html', form=form)
@auth.route('/logout/')
@login_required
def logout():
"""View of logout"""
logout_user()
flash('Administrator Logged out')
return redirect(url_for('main.index'))
|
StarcoderdataPython
|
9663353
|
<reponame>adfoucart/deephisto
from .PAN import PAN
from .ShortRes import ShortRes
from .UNet import UNet
from .BaseNetwork import BaseNetwork
|
StarcoderdataPython
|
11294534
|
""" Blind SQL Injection (BSQLI) PayloadGroup type """
#FIXME: how does this handle add_payload? see PNKTHR-43
from massweb.payloads.bsqli_payload import BSQLIPayload
from massweb.payloads.payload_group import PayloadGroup
class BSQLIPayloadGroup(PayloadGroup):
""" Blind SQL Injection Payload type:
contains multiple pairs of BSQLIPayloads"""
def __init__(self, true_payload, false_payload):
""" Initialize this BSQLIPayloadGroup.
true_payload BSQLIPayload for the true SQL statement
false_payload BSQLIPayload for the false SQL statement
"""
if isinstance(true_payload, BSQLIPayload) and isinstance(false_payload, BSQLIPayload):
pass
else:
raise TypeError("input payloads must be of type BSQLIPayload")
#FIXME: maybe just negate this and skip the else?
if true_payload.payload_attributes["truth"] == True and false_payload.payload_attributes["truth"] == False:
pass
else:
raise ValueError("true_payload must have an truth attribute of True and false_payload must have a truth attribute of False")
self.true_payload = true_payload
self.false_payload = false_payload
"""
payload_false = BSQLIPayload("dddd", {"truth" : False})
payload_true = BSQLIPayload("ddd333d", {"truth" : True})
bspg = BSQLIPayloadGroup(payload_true, payload_false)
"""
|
StarcoderdataPython
|
6625769
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""ROS2 Robot Simulation Launch File.
This script simulates a robot in Gazebo simulation.
Revision History:
2021-10-23 (Animesh): Baseline Software.
Example:
$ colcon build && source install/setup.bash && ros2 launch ros2_robot_simulation launch.py
$ source install/setup.bash && ros2 launch ros2_robot_simulation launch.py
$ ros2 launch ros2_robot_simulation launch.py
"""
#___Import Modules:
import os
from ament_index_python.packages import get_package_share_directory
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument, IncludeLaunchDescription
from launch.launch_description_sources import PythonLaunchDescriptionSource
from launch_ros.actions import Node
#___Function:
def generate_launch_description():
# Get the package directory
ros2_world_simulation_dir = get_package_share_directory('ros2_world_simulation')
ros2_robot_simulation_dir = get_package_share_directory('ros2_robot_simulation')
# Create launch configuration variables
use_simulator = LaunchConfiguration('use_simulator')
headless = LaunchConfiguration('headless')
world = LaunchConfiguration('world')
x_pos = LaunchConfiguration('x_pos')
y_pos = LaunchConfiguration('y_pos')
z_pos = LaunchConfiguration('z_pos')
roll = LaunchConfiguration('roll')
pitch = LaunchConfiguration('pitch')
yaw = LaunchConfiguration('yaw')
urdf_file = LaunchConfiguration('urdf_file')
# Declare the launch arguments
declare_use_simulator_cmd = DeclareLaunchArgument(
'use_simulator',
default_value='True',
description='Whether to start the simulator')
declare_simulator_cmd = DeclareLaunchArgument(
'headless',
default_value='False',
description='Whether to execute gzclient)')
declare_world_cmd = DeclareLaunchArgument(
'world',
default_value=os.path.join(ros2_world_simulation_dir, 'worlds', 'empty.world'),
description='Full path to world model file to load')
declare_x_pos_cmd = DeclareLaunchArgument(
'x_pos',
default_value='0.0')
declare_y_pos_cmd = DeclareLaunchArgument(
'y_pos',
default_value='0.0')
declare_z_pos_cmd = DeclareLaunchArgument(
'z_pos',
default_value='0.0')
declare_roll_cmd = DeclareLaunchArgument(
'roll',
default_value='0.0')
declare_pitch_cmd = DeclareLaunchArgument(
'pitch',
default_value='0.0')
declare_yaw_cmd = DeclareLaunchArgument(
'yaw',
default_value='0.0')
declare_urdf_file_cmd = DeclareLaunchArgument(
'urdf_file',
default_value='jetbot.urdf')
# Specify the actions
world_launch_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(ros2_world_simulation_dir, 'launch', 'launch.py')),
launch_arguments={'use_simulator': use_simulator,
'headless': headless,
'world': world}.items())
spawn_robot_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(ros2_robot_simulation_dir, 'launch', 'spawn.py')),
launch_arguments={'x_pos': x_pos,
'y_pos': y_pos,
'z_pos': z_pos,
'roll': roll,
'pitch': pitch,
'yaw': yaw,
'urdf': urdf_file}.items())
robot_states_cmd = IncludeLaunchDescription(
PythonLaunchDescriptionSource(os.path.join(ros2_robot_simulation_dir, 'launch', 'states.py')),
launch_arguments={'urdf': urdf_file,}.items())
# Create the launch description and populate
ld = LaunchDescription()
# Declare the launch options
ld.add_action(declare_use_simulator_cmd)
ld.add_action(declare_simulator_cmd)
ld.add_action(declare_world_cmd)
ld.add_action(declare_x_pos_cmd)
ld.add_action(declare_y_pos_cmd)
ld.add_action(declare_z_pos_cmd)
ld.add_action(declare_roll_cmd)
ld.add_action(declare_pitch_cmd)
ld.add_action(declare_yaw_cmd)
ld.add_action(declare_urdf_file_cmd)
# Add all actions
ld.add_action(world_launch_cmd)
ld.add_action(spawn_robot_cmd)
ld.add_action(robot_states_cmd)
return ld
#
# end of file
"""ANI717"""
|
StarcoderdataPython
|
6580933
|
<filename>fas2ipa/cli.py<gh_stars>0
from urllib.parse import parse_qs, urlencode
import click
import vcr
from python_freeipa import ClientLegacy as Client
from fedora.client.fas2 import AccountSystem
from .config import get_config
from .statistics import Stats
from .users import Users
from .groups import Groups
from .agreements import Agreements
class FASWrapper:
_remove_from_request_body = ("_csrf_token", "user_name", "password", "login")
def __init__(self, config):
self.fas = AccountSystem(
config["fas"]["url"],
username=config["fas"]["username"],
password=config["fas"]["password"],
)
self._replay = config["replay"]
self._recorder = vcr.VCR(
ignore_hosts=config["ipa"]["instances"],
record_mode="new_episodes",
filter_post_data_parameters=self._remove_from_request_body,
)
self._recorder.register_matcher("fas2ipa", self._vcr_match_request)
def _vcr_match_request(self, r1, r2):
assert r1.query == r2.query
body1 = parse_qs(r1.body)
body2 = parse_qs(r2.body)
for param in self._remove_from_request_body:
for body in (body1, body2):
try:
del body[param]
except KeyError:
pass
assert body1 == body2
def _vcr_get_cassette_path(self, url, *args, **kwargs):
params = kwargs.get("req_params", {})
cassette_path = [
"fixtures/fas-",
url[1:].replace("/", "_"),
".yaml",
]
if params:
cassette_path[2:2] = [
"-",
urlencode(params, doseq=True),
]
return "".join(cassette_path)
def send_request(self, url, *args, **kwargs):
if not self._replay:
return self.fas.send_request(url, *args, **kwargs)
cassette_path = self._vcr_get_cassette_path(url, *args, **kwargs)
with self._recorder.use_cassette(cassette_path, match_on=["fas2ipa"]):
return self.fas.send_request(url, *args, **kwargs)
@click.command()
@click.option("--skip-groups", is_flag=True, help="Skip group creation")
@click.option(
"--skip-user-add", is_flag=True, help="Don't add or update users",
)
@click.option(
"--skip-user-membership", is_flag=True, help="Don't add users to groups",
)
@click.option(
"--skip-user-signature",
is_flag=True,
help="Don't store users signatures of agreements",
)
@click.option("--users-start-at", help="Start migrating users at that letter")
def cli(
skip_groups,
skip_user_add,
skip_user_membership,
skip_user_signature,
users_start_at,
):
config = get_config()
config["skip_groups"] = skip_groups
config["skip_user_add"] = skip_user_add
config["skip_user_membership"] = skip_user_membership
config["skip_user_signature"] = skip_user_signature
fas = FASWrapper(config)
click.echo("Logged into FAS")
instances = []
for instance in config["ipa"]["instances"]:
ipa = Client(host=instance, verify_ssl=config["ipa"]["cert_path"])
ipa.login(config["ipa"]["username"], config["ipa"]["password"])
instances.append(ipa)
click.echo("Logged into IPA")
stats = Stats()
agreements = Agreements(config, instances, fas)
if config.get("agreement"):
agreements.create()
groups = Groups(config, instances, fas, agreements=agreements)
groups_stats = groups.migrate_groups()
stats.update(groups_stats)
users = Users(config, instances, fas, agreements=agreements)
users_stats = users.migrate_users(users_start_at=users_start_at)
stats.update(users_stats)
stats.print()
|
StarcoderdataPython
|
5071326
|
<filename>pd-service-ep-toggle.py
#!/usr/local/bin/python
import requests
import json
apiKey = 'YOURAPIKEYGOESHERE' # api key MUST have write access
serviceId = 'PR8MOOH'
weekdayEpId = 'P5IB0SR'
weekendEpId = 'PDJMLYV'
url = "https://api.pagerduty.com/services/" + serviceId
headers = {
'Authorization': 'Token token=' + apiKey,
'Accept': 'application/vnd.pagerduty+json;version=2',
'Content-Type': 'application/json'
}
response = requests.request("GET", url, headers=headers)
serviceDetails = response.json()['service']
currentEp = serviceDetails['escalation_policy']['id']
if currentEp == weekdayEpId:
newEp = weekendEpId
else:
newEp = weekdayEpId
print("\n")
print("Service Name: " + serviceDetails['name'] + " (" + serviceId + ")")
print("----------------------------------------------")
print("🔴 Previous EP: " + currentEp)
payload = json.dumps({
"service": {
"type": "service",
"escalation_policy": {
"id": newEp,
"type": "escalation_policy_reference"
}
}
})
requests.request("PUT", url, headers=headers, data=payload)
print("🟢 Updated EP: " + newEp)
print("\n")
|
StarcoderdataPython
|
4952788
|
<reponame>IndustryApps/flask-eureka
# coding: utf-8
from flask_eureka.eureka import Eureka, eureka_bp
|
StarcoderdataPython
|
3581597
|
#Copyright (C) 2019. Huawei Technologies Co., Ltd. All rights reserved.
#This program is free software; you can redistribute it and/or modify it under the terms of the BSD 3-Clause License.
#This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the BSD 3-Clause License for more details.
import os
cpu_num = 4
import resnet
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
# from torch.autograd import Variable
from resnet import ResNet18,ResNet34
from torchvision.datasets import CIFAR100,ImageFolder,CIFAR10
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from timm.models import create_model
import vision_transformer
from loss import kdloss, csloss, patch_attention_probe_loss, robust_kdloss
from utils import accuracy, AverageMeter
from functools import partial
import random
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pdb
import numpy as np
import warnings
os.environ['OMP_NUM_THREADS'] = str(cpu_num)
os.environ['OPENBLAS_NUM_THREADS'] = str(cpu_num)
os.environ['MKL_NUM_THREADS'] = str(cpu_num)
os.environ['VECLIB_MAXIMUM_THREADS'] = str(cpu_num)
os.environ['NUMEXPR_NUM_THREADS'] = str(cpu_num)
torch.set_num_threads(cpu_num)
warnings.filterwarnings('ignore')
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--num_select', type=int, default=600000)
parser.add_argument('--data_cifar', type=str, default='/home/wjh19/database/cifar10/')
parser.add_argument('--data_imagenet', type=str, default='/home/wjh19/database/imagenet/train/')
parser.add_argument('--teacher', default='deit_base_patch4_32_teacher', type=str, metavar='MODEL',
help='Name of teacher model to train (default: "regnety_160"')
parser.add_argument('--teacher_dir', type=str, default='/home/wjh19/mage/DFND_DeiT/output/cifar10/teacher/checkpoint.pth')
parser.add_argument('--nb_classes', type=int, default=10, help='number of classes')
parser.add_argument('--lr_S', type=float, default=7.5e-4, help='learning rate')
parser.add_argument('--robust', action='store_true', default=False,
help='Robust distillation enabled (if avail)')
parser.add_argument('--attnprobe_sel', action='store_true', default=False,
help='Distillation by attention prime enabled (if avail)')
parser.add_argument('--random', action='store_true', default=False,
help='Randomly select wild data (if avail)')
parser.add_argument('--attnprobe_dist', action='store_true', default=False,
help='Distillation by attention prime enabled (if avail)')
parser.add_argument('--attnlier', type=float, default=0.05, help='weight of attention layer to sample the wild data')
parser.add_argument('--outlier', type=float, default=0.9, help='weight of output layer to sample the wild data')
parser.add_argument('--patchattn', type=float, default=0.8, help='weight of patch attention loss')
parser.add_argument('--pos_num', type=int, default=129)
parser.add_argument('--dataset', type=str, default='cifar10', choices=['cifar10','cifar100','imagenet','mnist'])
parser.add_argument('--epochs', type=float, default=800)
parser.add_argument('--output_dir', type=str, default='/home/wjh19/mage/DFND_DeiT/output/cifar10/')
parser.add_argument('--selected_file', type=str, default='/home/wjh19/mage/DFND_DeiT/selected/cifar10/')
parser.add_argument('--schedule', default=[200, 300], nargs='*', type=int,
help='learning rate schedule (when to drop lr by 10x)')
parser.add_argument('--cos', action='store_true',
help='use cosine lr schedule')
args,_ = parser.parse_known_args()
acc = 0
acc_best = 0
teacher = None
assert args.teacher_dir, 'need to specify teacher-path when using distillation'
print(f"Creating teacher model: {args.teacher}")
teacher = create_model(
args.teacher,
pretrained=False,
num_classes=args.nb_classes,
)
if args.dataset == 'imagenet':
embed_dim = 768
num_heads = 12
img_size = 224
else:
embed_dim = 384
num_heads = 3
img_size = 32
checkpoint = torch.load(args.teacher_dir, map_location='cpu')
teacher.load_state_dict(checkpoint['model'])
teacher.cuda()
teacher.eval()
teacher = nn.DataParallel(teacher)
# teacher = torch.load(args.teacher_dir + 'teacher').cuda()
# teacher.eval()
for parameter in teacher.parameters():
parameter.requires_grad = False
def get_class_weight(model, dataloader, num_classes=10, T=1):
classes_outputs = np.zeros(num_classes)
model.eval()
if os.path.exists(args.selected_file + 'class_weights.pth'):
class_weights = torch.load(args.selected_file + 'class_weights.pth')
else:
for i,(inputs, labels) in enumerate(dataloader):
inputs = inputs.cuda()
with torch.set_grad_enabled(False):
outputs, output_feature_t = model(inputs)
outputs = F.softmax(outputs/T, dim=1)
for j in range(inputs.shape[0]):
classes_outputs += outputs[j].cpu().data.numpy()
class_weights = 1/classes_outputs
weights_sum = np.sum(class_weights)
class_weights /= weights_sum
class_weights *= num_classes
torch.save(class_weights, args.selected_file + 'class_weights.pth')
return class_weights
def perturb(weight, epsilon=0.1, perturb_num=1):
weights = []
weights.append(weight)
for i in range(perturb_num):
p = np.random.rand(weight.shape[0]) * epsilon
weight_new = weight + p
weights.append(weight_new)
return weights
normalize = transforms.Normalize(mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010])
data_train = ImageFolder(args.data_imagenet, transforms.Compose([
transforms.Resize((img_size,img_size)),
transforms.ToTensor(),
normalize,
]))
data_train_transform = ImageFolder(args.data_imagenet, transforms.Compose([
transforms.Resize((img_size,img_size)),
transforms.RandomCrop(img_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
transform_train = transforms.Compose([
transforms.RandomCrop(img_size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_train_imagenet = transforms.Compose([
transforms.Resize(224),
transforms.RandomCrop(224, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
transform_test_imagenet = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])
if args.dataset == 'cifar100':
data_test = CIFAR100(args.data_cifar,
train=False,
transform=transform_test)
teacher_acc = torch.tensor([0.7630])
n_classes = 100
if args.dataset == 'cifar10':
data_test = CIFAR10(args.data_cifar,
train=False,
transform=transform_test)
teacher_acc = torch.tensor([0.9665])
n_classes = 10
if args.dataset == 'imagenet':
label_trainset = ImageFolder(os.path.join(args.data_cifar, 'train'),
transform_train_imagenet)
data_test = ImageFolder(os.path.join(args.data_cifar, 'val'),
transform_test_imagenet)
teacher_acc = torch.tensor([0.8118])
n_classes = 1000
if os.path.exists(args.selected_file + 'labelset_index_dict.pth'):
labelset_index = torch.load(args.selected_file + 'labelset_index_dict.pth')['positive_index']
else:
labelset_index = []
classnum = np.ones(n_classes) * args.pos_num
i = 0
sample_idx = 0
while(np.sum(classnum) > 0):
image = label_trainset[i][0]
label = label_trainset[i][1]
if(classnum[label] > 0):
labelset_index.append(i)
classnum[label] -= 1
sample_idx += 1
i += 1
print('Sample %d from the original dataset.' % sample_idx)
labelset_index_dict = {}
labelset_index_dict['positive_index'] = labelset_index
torch.save(labelset_index_dict, args.selected_file + 'labelset_index_dict.pth')
print("Positive data has been sampled from the original dataset!")
label_train_subset = torch.utils.data.Subset(label_trainset, labelset_index)
data_test_loader = DataLoader(data_test, batch_size=1000, num_workers=0)
noise_adaptation = torch.nn.Parameter(torch.zeros(n_classes,n_classes-1))
def noisy(noise_adaptation):
# noise_adaptation_softmax: (n_classes,n_classes-1)
noise_adaptation_softmax = torch.nn.functional.softmax(noise_adaptation,dim=1) * (1 - teacher_acc)
# noise_adaptation_layer: (n_classes,n_classes)
noise_adaptation_layer = torch.zeros(n_classes,n_classes)
for i in range(n_classes):
if i == 0:
noise_adaptation_layer[i] = torch.cat([teacher_acc,noise_adaptation_softmax[i][i:]])
if i == n_classes-1:
noise_adaptation_layer[i] = torch.cat([noise_adaptation_softmax[i][:i],teacher_acc])
else:
noise_adaptation_layer[i] = torch.cat([noise_adaptation_softmax[i][:i],teacher_acc,noise_adaptation_softmax[i][i:]])
# noise_adaptation_layer: (n_classes,n_classes)
return noise_adaptation_layer.cuda()
# net = ResNet18(n_classes).cuda()
if args.dataset == 'imagenet':
print("Creating student model: deiT_tiny_patch16_224")
net = vision_transformer.TeacherVisionTransformer(img_size=224, patch_size=16, in_chans=3, num_classes=args.nb_classes, embed_dim=192, depth=12, num_heads=3, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)).cuda()
else:
print("Creating student model: deiT_xtiny_patch4_32")
net = vision_transformer.TeacherVisionTransformer(img_size=32, patch_size=4, in_chans=3, num_classes=args.nb_classes, embed_dim=128, depth=12, num_heads=2, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6)).cuda()
net = torch.nn.DataParallel(net)
criterion = torch.nn.CrossEntropyLoss().cuda()
celoss = torch.nn.CrossEntropyLoss(reduction = 'none').cuda()
# optimizer = torch.optim.SGD(list(net.parameters()), lr=0.1, momentum=0.9, weight_decay=5e-4)
optimizer = torch.optim.AdamW(net.parameters(), lr=args.lr_S, weight_decay=0.025)
optimizer_noise = torch.optim.Adam([noise_adaptation], lr=0.001)
data_train_loader_noshuffle = DataLoader(data_train, batch_size=256, shuffle=False, num_workers=8)
def identify_attnlier(checkpoint, embed_dim, num_heads):
value_blk3 = []
value_blk7 = []
# pred_list = []
attn_inputs_blk3 = []
attn_inputs_blk7 = []
index = 0
embed_dim = int(embed_dim/num_heads)
scale = embed_dim ** -0.5
teacher.eval()
# Obtain weights and bias
# linear_weight_blk_3: (1152, 384). linear_bias_blk_3: (1152)
linear_weight_blk_3 = checkpoint['model']['blocks.3.attn.qkv.weight'].cuda()
linear_bias_blk_3 = checkpoint['model']['blocks.3.attn.qkv.bias'].cuda()
# linear_weight_q_blk_3, linear_weight_k_blk_3, linear_weight_v_blk_3 = torch.split(linear_weight_blk_3, [384, 384, 384], dim=0)
linear_weight_blk_7 = checkpoint["model"]['blocks.7.attn.qkv.weight'].cuda()
linear_bias_blk_7 = checkpoint["model"]['blocks.7.attn.qkv.bias'].cuda()
# linear_weight_q_blk_7, linear_weight_k_blk_7, linear_weight_v_blk_7 = torch.split(linear_weight_blk_7, [384, 384, 384], dim=0)
hooksadd = [
teacher.module.blocks[3].attn.register_forward_hook(
lambda self, input, output: attn_inputs_blk3.append(input)
),
teacher.module.blocks[7].attn.register_forward_hook(
lambda self, input, output: attn_inputs_blk7.append(input)
),
]
for i,(inputs, labels) in enumerate(data_train_loader_noshuffle):
inputs = inputs.cuda()
outputs, output_feature = teacher(inputs)
# calculate input × weights and view the shape
# B, N, C = 256, 65, 384
B, N, C = attn_inputs_blk3[index][0].shape
uniform = (torch.ones(B, N-1)/(N-1)).float().cuda()
qkv_blk_3 = torch.bmm(attn_inputs_blk3[0][0], linear_weight_blk_3.unsqueeze(0).repeat(B, 1, 1).permute(0, 2, 1)) + linear_bias_blk_3
qkv_blk_3 = qkv_blk_3.reshape(B, N, 3, num_heads, embed_dim).permute(2, 0, 3, 1, 4)
q_blk_3, k_blk_3, v_blk_3 = qkv_blk_3[0], qkv_blk_3[1], qkv_blk_3[2] # make torchscript happy (cannot use tensor as tuple)
# attn_blk_3: (B, num_heads, N, N) = (256, num_heads, 65, 65)
attn_blk_3 = (q_blk_3 @ k_blk_3.transpose(-2, -1)) * scale
attn_blk_3 = attn_blk_3.softmax(dim=-1)
# attnprime_blk_3: (B, N-1) = (256, 64)
attnprime_blk_3 = attn_blk_3[:,0,0,1:]
qkv_blk_7 = torch.bmm(attn_inputs_blk7[0][0], linear_weight_blk_7.unsqueeze(0).repeat(B, 1, 1).permute(0, 2, 1)) + linear_bias_blk_7
qkv_blk_7 = qkv_blk_7.reshape(B, N, 3, num_heads, embed_dim).permute(2, 0, 3, 1, 4)
q_blk_7, k_blk_7, v_blk_7 = qkv_blk_7[0], qkv_blk_7[1], qkv_blk_7[2] # make torchscript happy (cannot use tensor as tuple)
# attn_blk_7: (B, num_heads, N, N)
attn_blk_7 = (q_blk_7 @ k_blk_7.transpose(-2, -1)) * scale
attn_blk_7 = attn_blk_7.softmax(dim=-1)
# attnprime_blk_7: (B, N-1)
attnprime_blk_7 = attn_blk_7[:,0,0,1:]
loss_blk3 = csloss(attnprime_blk_3, uniform)
loss_blk7 = csloss(attnprime_blk_7, uniform)
value_blk3.append(loss_blk3.detach().clone())
value_blk7.append(loss_blk7.detach().clone())
attn_inputs_blk3.clear()
attn_inputs_blk7.clear()
print('Considering attnlier of batch %d from the wild massive unlabeled dataset.' % i)
for hook in hooksadd:
hook.remove()
return torch.cat(value_blk3,dim=0), torch.cat(value_blk7,dim=0)
def identify_outlier():
value = []
pred_list = []
index = 0
teacher.eval()
for i,(inputs, labels) in enumerate(data_train_loader_noshuffle):
inputs = inputs.cuda()
# outputs: (bs, n_classes)
outputs, output_feature = teacher(inputs)
# pred: (bs, 1)
pred = outputs.data.max(1)[1]
loss = celoss(outputs, pred)
value.append(loss.detach().clone())
index += inputs.shape[0]
pred_list.append(pred)
print('Considering outlier of batch %d from the wild massive unlabeled dataset.' % i)
return torch.cat(value,dim=0), torch.cat(pred_list,dim=0)
def train(epoch, trainloader, nll, class_weights):
net.train()
loss_list, batch_list = [], []
interval = len(trainloader) // 6
for i, (images, labels) in enumerate(trainloader):
images, labels = images.cuda(), labels.cuda()
optimizer.zero_grad()
optimizer_noise.zero_grad()
output, output_feature_s = net(images)
output_t, output_feature_t = teacher(images)
output_t = output_t.detach()
output_feature_t = output_feature_t.detach()
pred = output_t.data.max(1)[1]
preds_t = pred.cpu().data.numpy()
if args.robust:
for class_weight in class_weights:
weights = torch.from_numpy(class_weight[preds_t]).float().cuda()
loss = robust_kdloss(output, output_t, weights)
else:
loss = kdloss(output, output_t)
output_s = F.softmax(output, dim=1)
output_s_adaptation = torch.matmul(output_s, noisy(noise_adaptation))
loss += nll(torch.log(output_s_adaptation), pred)
if args.attnprobe_dist:
loss_patch_attn = args.patchattn * patch_attention_probe_loss(output_feature_t, output_feature_s)
loss += loss_patch_attn
loss_list.append(loss.data.item())
batch_list.append(i+1)
if (i % interval) == 0:
if args.attnprobe_dist:
print('Train - Epoch %d, Batch: %d, Loss: %f, Loss_attn: %f' % (epoch, i, loss.data.item(), loss_patch_attn.data.item()))
else:
print('Train - Epoch %d, Batch: %d, Loss: %f' % (epoch, i, loss.data.item()))
loss.backward()
optimizer.step()
optimizer_noise.step()
def test(epoch):
global acc, acc_best, epoch_best
net.eval()
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
total_correct = 0
avg_loss = 0.0
with torch.no_grad():
for i, (images, labels) in enumerate(data_test_loader):
images, labels = images.cuda(), labels.cuda()
output, output_feature_s = net(images)
avg_loss += criterion(output, labels).sum()
pred = output.data.max(1)[1]
total_correct += pred.eq(labels.data.view_as(pred)).sum()
acc1, acc5 = accuracy(output, labels, topk=(1, 5))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
avg_loss /= len(data_test)
acc = float(total_correct) / len(data_test)
if acc_best < acc:
torch.save(net.state_dict(), args.output_dir + 'student/' + 'checkpoint.pth')
acc_best = acc
epoch_best = epoch
print('Test Avg. Loss: %f, Accuracy: %f. Epoch: %d' % (avg_loss.data.item(), acc, epoch))
print('******** ******** ********')
print('Test Avg Best. Accuracy: %f. Epoch: %d' % (acc_best, epoch_best))
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'.format(top1=top1, top5=top5))
def train_and_test(epoch, trainloader3, nll, class_weight):
train(epoch, trainloader3, nll, class_weight)
test(epoch)
# def adjust_learning_rate(optimizer, epoch, max_epoch):
# """For resnet, the lr starts from 0.1, and is divided by 10 at 80 and 120 epochs"""
# if epoch < (max_epoch/200.0*80.0):
# lr = 0.1
# elif epoch < (max_epoch/200.0*160.0):
# lr = 0.01
# else:
# lr = 0.001
# for param_group in optimizer.param_groups:
# param_group['lr'] = lr
def adjust_learning_rate(optimizer, epoch, max_epoch, args):
"""Decay the learning rate based on schedule"""
lr = args.lr_S
if args.cos: # cosine lr schedule
lr *= 0.5 * (1. + math.cos(math.pi * epoch / max_epoch))
else: # stepwise lr schedule
for milestone in args.schedule:
lr *= 0.1 if epoch >= milestone else 1.
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def get_positive(value_blk3, value_blk7, value_out, args):
positive_index = []
if args.attnprobe_sel:
value = value_out
else:
value = args.attnlier * (value_blk3 + value_blk7) + args.outlier * value_out
if args.random:
print('randomly selected!')
positive_index = torch.tensor(random.sample(range(1281167), args.num_select))
else:
positive_index = value.topk(args.num_select,largest=False)[1]
return positive_index
def main():
global acc_best
if os.path.exists(args.selected_file + 'value_out.pth'):
value_out = torch.load(args.selected_file + 'value_out.pth').cuda()
pred_out = torch.load(args.selected_file + 'pred_out.pth').cuda()
value_blk3 = torch.load(args.selected_file + 'value_blk3.pth').cuda()
value_blk7 = torch.load(args.selected_file + 'value_blk7.pth').cuda()
# value_numpy = np.loadtxt(args.selected_file + '/value.txt')
# value = torch.Tensor(value_numpy)
# pred_numpy = np.loadtxt(args.selected_file + '/pred.txt')
# pred = torch.Tensor(pred_numpy)
else:
value_blk3, value_blk7 = identify_attnlier(checkpoint, embed_dim, num_heads)
value_out, pred_out = identify_outlier()
torch.save(value_out, args.selected_file + 'value_out.pth')
torch.save(pred_out, args.selected_file + 'pred_out.pth')
torch.save(value_blk3, args.selected_file + 'value_blk3.pth')
torch.save(value_blk7, args.selected_file + 'value_blk7.pth')
# np.savetxt(args.selected_file + '/value.txt', value.numpy(), fmt='%d',delimiter=None)
# np.savetxt(args.selected_file + '/pred.txt', pred.numpy(), fmt='%d',delimiter=None)
positive_index = get_positive(value_blk3, value_blk7, value_out, args)
nll = torch.nn.NLLLoss().cuda()
positive_index = positive_index.tolist()
data_train_select = torch.utils.data.Subset(data_train_transform, positive_index)
trainloader3 = torch.utils.data.DataLoader(label_train_subset + data_train_select, batch_size=256, shuffle=True, num_workers=8, pin_memory=True)
class_weight = get_class_weight(teacher, trainloader3, num_classes=args.nb_classes)
print(class_weight)
class_weights = perturb(class_weight)
epoch = int(640000/args.num_select * 512)
for e in range(1, epoch):
adjust_learning_rate(optimizer, e, epoch, args)
train_and_test(e, trainloader3, nll, class_weights)
print(acc_best)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
11262630
|
import re
from ._abstract import AbstractScraper
from ._utils import get_minutes, get_yields, normalize_string
class SunBasket(AbstractScraper):
@classmethod
def host(self, domain="com"):
return f"sunbasket.{domain}"
def title(self):
return self.soup.find("h1").get_text()
def total_time(self):
minutes_tag = self.soup.find("span", text=re.compile(r"Minutes"))
return get_minutes(minutes_tag.parent.get_text())
def yields(self):
yields_tag = self.soup.find("span", text=re.compile(r"Servings,"))
return get_yields(yields_tag.parent.get_text())
def ingredients(self):
ingredients_container = self.soup.find(class_="ingredients-list")
ingredients = ingredients_container.findAll("li")
return [normalize_string(ingredient.get_text()) for ingredient in ingredients]
def instructions(self):
instructions_container = self.soup.find(
"div", {"class": "instructions-container"}
)
instructions = instructions_container.findAll("div", {"class": "step"})
instruction_list = []
for instruction in instructions:
step_number_tag = instruction.find(class_="step-number")
if step_number_tag is not None:
step_number = normalize_string(
instruction.find(class_="step-number").get_text()
)
step_name = normalize_string(
instruction.find(class_="step-header").get_text()
)
step_instructions = normalize_string(
instruction.find(class_="instruction-description").get_text()
)
instruction_list.append(
f"{step_number}: {step_name} - {step_instructions}"
)
return instruction_list
def image(self):
container = self.soup.find("div", {"class": "recipe-image-container"})
if not container:
return None
image = container.find("img", {"src": True})
return image["src"] if image else None
|
StarcoderdataPython
|
3494335
|
<reponame>tperka-catalogicsoftware/pre-commit-maven
import argparse
import os.path
from pre_commit_maven.utils import generic_main
CWD = os.getcwd()
def main(cwd=CWD, print_fn=print, execute_fn=generic_main.execute) -> int:
return execute_fn(["clean", "test"], cwd)
if __name__ == "__main__":
exit(main())
|
StarcoderdataPython
|
1955796
|
<filename>data_acquisition/figure_A7/2016_11_18_modulated_imaging_darkfield_nanodiamond_7_extra_green_filter/registration_brightness_correction_rep_avg.py
import numpy as np
from scipy.ndimage import gaussian_filter
from stack_registration import stack_registration, apply_registration_shifts
import np_tif
def main():
# each raw data stack has a full red and green power scan with red
# varying slowly and green varying more quickly and green/red pulse
# delay varying the quickest (5 delays, middle delay is 0 delay)
num_reps = 200 # number power scans taken
num_red_powers = 7
num_green_powers = 13
num_delays = 5
image_h = 128
image_w = 380
less_rows = 3 # top/bottom 3 rows may contain leakage from outside pixels
top = less_rows
bot = image_h - less_rows
# assume no sample motion during a single power scan
# allocate hyperstack to carry power/delay-averaged images for registration
data_rep = np.zeros((
num_reps,
image_h - less_rows * 2,
image_w,
), dtype=np.float64)
data_rep_bg = np.zeros((
num_reps,
image_h - less_rows * 2,
image_w,
), dtype=np.float64)
# allocate array to carry a number corresponding to the average red
# beam brightness for each red power
red_avg_brightness = np.zeros((num_red_powers))
# populate hyperstack from data
for rep_num in range(num_reps):
filename = 'STE_darkfield_power_delay_scan_' + str(rep_num) + '.tif'
print("Loading", filename)
imported_power_scan = np_tif.tif_to_array(
filename).astype(np.float64)[:, top:bot, :]
red_avg_brightness += get_bg_level(
imported_power_scan.reshape(
num_red_powers,
num_green_powers,
num_delays,
image_h - less_rows * 2,
image_w).mean(axis=1).mean(axis=1)
) / (2 * num_reps)
data_rep[rep_num, :, :] = imported_power_scan.mean(axis=0)
filename_bg = (
'STE_darkfield_power_delay_scan_' +
str(rep_num) + '_green_blocked.tif')
print("Loading", filename_bg)
imported_power_scan_bg = np_tif.tif_to_array(
filename_bg).astype(np.float64)[:, top:bot, :]
red_avg_brightness += get_bg_level(
imported_power_scan_bg.reshape(
num_red_powers,
num_green_powers,
num_delays,
image_h - less_rows * 2,
image_w).mean(axis=1).mean(axis=1)
) / (2 * num_reps)
data_rep_bg[rep_num, :, :] = imported_power_scan_bg.mean(axis=0)
# reshape red_avg_brightness to add a dimension for multiplication
# with a brightness array with dimensions num_red_powers X num_green
# powers X num_delays
red_avg_brightness = red_avg_brightness.reshape(num_red_powers, 1, 1)
# pick image/slice for all stacks to align to
representative_rep_num = 0
align_slice = data_rep[representative_rep_num, :, :]
# save pre-registered average data (all powers for each rep)
np_tif.array_to_tif(data_rep,
'dataset_not_registered_power_avg.tif')
np_tif.array_to_tif(data_rep_bg,
'dataset_green_blocked_not_registered_power_avg.tif')
# compute registration shifts
print("Computing registration shifts...")
shifts = stack_registration(
data_rep,
align_to_this_slice=align_slice,
refinement='integer',
register_in_place=True,
background_subtraction='edge_mean')
print("Computing registration shifts (no green) ...")
shifts_bg = stack_registration(
data_rep_bg,
align_to_this_slice=align_slice,
refinement='integer',
register_in_place=True,
background_subtraction='edge_mean')
# save registered average data (all powers for each rep) and shifts
np_tif.array_to_tif(data_rep,
'dataset_registered_power_avg.tif')
np_tif.array_to_tif(data_rep_bg,
'dataset_green_blocked_registered_power_avg.tif')
np_tif.array_to_tif(shifts, 'shifts.tif')
np_tif.array_to_tif(shifts_bg, 'shifts_bg.tif')
# now apply shifts to raw data and compute space-averaged signal
# and representative images
# define box around main lobe for computing space-averaged signal
rect_top = 44
rect_bot = 102
rect_left = 172
rect_right = 228
# initialize hyperstacks for signal (with/without green light)
print('Applying shifts to raw data...')
signal = np.zeros((
num_reps,
num_red_powers,
num_green_powers,
num_delays,
), dtype=np.float64)
signal_bg = np.zeros((
num_reps,
num_red_powers,
num_green_powers,
num_delays,
), dtype=np.float64)
data_hyper_shape = (
num_red_powers, num_green_powers, num_delays, image_h, image_w)
# get representative image cropping coordinates
rep_top = 22
rep_bot = 122
rep_left = 136
rep_right = 262
# initialize representative images (with/without green light)
darkfield_image = np.zeros((#num_reps,
rep_bot-rep_top,
rep_right-rep_left,
), dtype=np.float64)
STE_image = np.zeros((#num_reps,
rep_bot-rep_top,
rep_right-rep_left,
), dtype=np.float64)
darkfield_image_bg = np.zeros((#num_reps,
rep_bot-rep_top,
rep_right-rep_left,
), dtype=np.float64)
STE_image_bg = np.zeros((#num_reps,
rep_bot-rep_top,
rep_right-rep_left,
), dtype=np.float64)
# finally apply shifts and compute output data
for rep_num in range(num_reps):
filename = 'STE_darkfield_power_delay_scan_' + str(rep_num) + '.tif'
data = np_tif.tif_to_array(
filename).astype(np.float64)[:, top:bot, :]
filename_bg = ('STE_darkfield_power_delay_scan_' + str(rep_num) +
'_green_blocked.tif')
data_bg = np_tif.tif_to_array(filename_bg).astype(
np.float64)[:, top:bot, :]
print(filename)
print(filename_bg)
# apply registration shifts
apply_registration_shifts(
data,
registration_shifts=[shifts[rep_num]]*data.shape[0],
registration_type='nearest_integer',
edges='sloppy')
apply_registration_shifts(
data_bg,
registration_shifts=[shifts_bg[rep_num]]*data_bg.shape[0],
registration_type='nearest_integer',
edges='sloppy')
# re-scale images to compensate for red beam brightness fluctuations
# for regular data
local_laser_brightness = get_bg_level(
data.reshape(
num_red_powers,
num_green_powers,
num_delays,
data.shape[-2],
data.shape[-1]))
local_calibration_factor = red_avg_brightness / local_laser_brightness
local_calibration_factor = local_calibration_factor.reshape(
num_red_powers * num_green_powers * num_delays, 1, 1)
data = data * local_calibration_factor
# for green blocked data
local_laser_brightness_bg = get_bg_level(
data_bg.reshape(
num_red_powers,
num_green_powers,
num_delays,
data.shape[-2],
data.shape[-1]))
local_calibration_factor_bg = (
red_avg_brightness / local_laser_brightness_bg)
local_calibration_factor_bg = local_calibration_factor_bg.reshape(
num_red_powers * num_green_powers * num_delays, 1, 1)
data_bg = data_bg * local_calibration_factor_bg
# draw rectangle around bright lobe and spatially average signal
data_space_avg = data[:, rect_top:rect_bot,
rect_left:rect_right].mean(axis=2).mean(axis=1)
data_bg_space_avg = data_bg[:, rect_top:rect_bot,
rect_left:rect_right].mean(axis=2).mean(axis=1)
# reshape 1D signal and place in output file
signal[rep_num, :, :, :] = data_space_avg.reshape(
num_red_powers, num_green_powers, num_delays)
signal_bg[rep_num, :, :, :] = data_bg_space_avg.reshape(
num_red_powers, num_green_powers, num_delays)
# capture average images for max red/green power
image_green_power = num_green_powers - 1
image_red_power = num_red_powers - 1
STE_image += data[
-3, # Zero delay, max red power, max green power
rep_top:rep_bot,
rep_left:rep_right
]/num_reps
darkfield_image += data[
-1, # max red-green delay (2.5 us), max red power, max green power
rep_top:rep_bot,
rep_left:rep_right
]/num_reps/2 # one of two maximum absolute red/green delay values
darkfield_image += data[
-5, # min red-green delay (-2.5 us), max red power, max green power
rep_top:rep_bot,
rep_left:rep_right
]/num_reps/2 # one of two maximum absolute red/green delay values
STE_image_bg += data_bg[
-3, # Zero delay, max red power, max green power
rep_top:rep_bot,
rep_left:rep_right
]/num_reps
darkfield_image_bg += data_bg[
-1, # max red-green delay (2.5 us), max red power, max green power
rep_top:rep_bot,
rep_left:rep_right
]/num_reps/2 # one of two maximum absolute red/green delay values
darkfield_image_bg += data_bg[
-5, # min red-green delay (-2.5 us), max red power, max green power
rep_top:rep_bot,
rep_left:rep_right
]/num_reps/2 # one of two maximum absolute red/green delay values
print('Done applying shifts')
signal_tif_shape = (signal.shape[0] * signal.shape[1],
signal.shape[2],signal.shape[3])
print("Saving...")
np_tif.array_to_tif(signal.reshape(signal_tif_shape),
'signal_all_scaled.tif')
np_tif.array_to_tif(signal_bg.reshape(signal_tif_shape),
'signal_green_blocked_all_scaled.tif')
np_tif.array_to_tif(darkfield_image,
'darkfield_image_avg.tif')
np_tif.array_to_tif(darkfield_image_bg,
'darkfield_image_bg_avg.tif')
np_tif.array_to_tif(STE_image,
'STE_image_avg.tif')
np_tif.array_to_tif(STE_image_bg,
'STE_image_bg_avg.tif')
print("... done.")
return None
def get_bg_level(data):
num_regions = 2
# region 1
bg_up = 9
bg_down = 112
bg_left = 325#270#325
bg_right = 366
bg_level = data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
# region 2
bg_up = 9
bg_down = 112
bg_left = 8
bg_right = 64#130#64
bg_level += data[..., bg_up:bg_down, bg_left:bg_right].mean(axis=(-2, -1))
return(bg_level / num_regions)
main()
|
StarcoderdataPython
|
3554981
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod, util
from polygraphy.backend.base import BaseLoader
onnxruntime = mod.lazy_import("onnxruntime")
@mod.export(funcify=True)
class SessionFromOnnx(BaseLoader):
"""
Functor that builds an ONNX-Runtime inference session.
"""
def __init__(self, model_bytes):
"""
Builds an ONNX-Runtime inference session.
Args:
model_bytes (Union[Union[bytes, str], Callable() -> Union[bytes, str]]):
A serialized ONNX model or a path to a model or a callable that returns one of those.
"""
self._model_bytes_or_path = model_bytes
def call_impl(self):
"""
Returns:
onnxruntime.InferenceSession: The inference session.
"""
model_bytes, _ = util.invoke_if_callable(self._model_bytes_or_path)
return onnxruntime.InferenceSession(model_bytes)
|
StarcoderdataPython
|
4924125
|
from django.conf.urls import url
from django.contrib.auth.views import login
from . import views
urlpatterns = [
url(r'^private/$', views.private, name='private'),
url(r'^register/$', views.CreateUser.as_view(), name='register'),
url(r'^login/$', login,
{'template_name': 'users/login.html'}, name='login'),
url(r'^logout/$', views.logout_user, name='logout'),
]
|
StarcoderdataPython
|
353910
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# import python libs
import json
from pprint import pprint as pp
import sys
sys.path.append('../')
# import project libs
from constants import *
from helper import *
# defining globals & constants
PREANNOTATIONS_START_INDEX = 27
BLOCK_OFFSET = 58
# methods
def check_annotation_corrections(data_frame):
for subject_id, subject_data in enumerate(data_frame):
for block_index in range(0, 4):
(preannotations, corrections) = restructure_data(subject_data, block_index)
for index, number_of_annotations in enumerate(preannotations):
if number_of_annotations != sum(corrections[index]):
print('ERROR FOUND! subject', subject_id, ' block', block_index)
print('sum does not match for class', index)
print('expecting', number_of_annotations, 'annotations, got:', corrections[index])
print('full distribution and answer table for subject:')
pp(preannotations)
pp(corrections)
exit()
print('✓ annotation corrections of subject ID', subject_id, 'are valid')
def restructure_data(subject_data, block_index):
start_index = PREANNOTATIONS_START_INDEX + (BLOCK_OFFSET * block_index)
preannotations = [
subject_data[start_index],
subject_data[start_index + 7],
subject_data[start_index + 14],
subject_data[start_index + 21],
subject_data[start_index + 28],
subject_data[start_index + 35]
]
corrections = [
[],
[],
[],
[],
[],
[]
]
data_offset = (BLOCK_OFFSET * block_index)
for class_index in range(0, 6):
for answer_index in range(0, 6):
answer_count = subject_data[PREANNOTATIONS_START_INDEX + data_offset + 1 + class_index + answer_index]
corrections[class_index].append(answer_count)
data_offset += 6
return (preannotations, corrections)
def check_shape(data_frame):
header_length = len(data_frame[0])
for subject_id in range(0, len(data_frame) - 1):
subject_row = data_frame[subject_id + 1]
current_length = len(subject_row)
if current_length != header_length:
print('ERROR FOUND! row of subject ID', subject_id, 'is', current_length, 'but should be', header_length)
exit()
print('✓ length of subject ID', subject_id, 'is valid')
data = read_json_file('../' + JSON_DATA_FRAME_FILE_NAME)
check_shape(data)
check_annotation_corrections(data)
|
StarcoderdataPython
|
8139441
|
#!/usr/bin/env python
import roslib
roslib.load_manifest('blob_tracker_base')
import rospy
from sensor_msgs.msg import RegionOfInterest
from sensor_msgs.msg import CameraInfo
from geometry_msgs.msg import Twist
class BlobFollower:
def __init__(self):
self.blob = None
self.info = None
rospy.init_node('blob_follow')
self.pub = rospy.Publisher("~twistOut",Twist,queue_size=1)
rospy.Subscriber("~blob",RegionOfInterest,self.store_blob)
rospy.Subscriber("~info",CameraInfo,self.store_info)
def store_blob(self,blob):
self.blob = blob
def store_info(self,info):
self.info = info
def run(self):
rospy.loginfo("Waiting for first blob and camera info")
t = Twist()
rate = rospy.Rate(10)
while (not rospy.is_shutdown()) and ((not self.info) or (not self.blob)):
self.pub.publish(t)
rate.sleep()
while not rospy.is_shutdown():
self.pub.publish(t)
rate.sleep()
if __name__=="__main__":
demo = BlobFollower()
demo.run()
|
StarcoderdataPython
|
12816887
|
<reponame>tensorchen/DianpingSpider
import base64
import random
from settings import PROXIES
from settings import USER_AGENTS
class RandomUserAgent(object):
def process_request(self, request, spider):
request.headers.setdefault('User-Agent', random.choice(USER_AGENTS))
class ProxyMiddleware(object):
def process_request(self, request, spider):
request.meta['proxy'] = "http://%s" % random.choice(PROXIES)
# proxy_user_pass = "<PASSWORD>"
# encode_user_pass = base64.encodestring(proxy_user_pass)
# request.headers['Proxy-Authorization'] = 'Basic' + proxy_user_pass
|
StarcoderdataPython
|
6412516
|
<reponame>jochym/ALM
import warnings
from collections import OrderedDict
import numpy as np
from . import _alm as alm
atom_names = ("X", "H", "He", "Li", "Be", "B", "C", "N", "O", "F",
"Ne", "Na", "Mg", "Al", "Si", "P", "S", "Cl", "Ar", "K",
"Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu",
"Zn", "Ga", "Ge", "As", "Se", "Br", "Kr", "Rb", "Sr", "Y",
"Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd", "Ag", "Cd", "In",
"Sn", "Sb", "Te", "I", "Xe", "Cs", "Ba", "La", "Ce", "Pr",
"Nd", "Pm", "Sm", "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm",
"Yb", "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt", "Au",
"Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr", "Ra", "Ac",
"Th", "Pa", "U", "Np", "Pu", "Am", "Cm", "Bk", "Cf", "Es",
"Fm", "Md", "No", "Lr", "Rf", "Db", "Sg", "Bh", "Hs", "Mt",
"Ds", "Rg", "Cn", "Uut", "Uuq", "Uup", "Uuh", "Uus", "Uuo")
# From src/optimize.h
# {sparsefolver: str} is omitted because this is set at ALM.optimize.
# This order is not allowed to change because it is explicitly used in _alm.c.
optimizer_control_data_types = OrderedDict([
('linear_model', int),
('use_sparse_solver', int),
('maxnum_iteration', int),
('tolerance_iteration', float),
('output_frequency', int),
('standardize', int),
('displacement_normalization_factor', float),
('debiase_after_l1opt', int),
('cross_validation', int),
('l1_alpha', float),
('l1_alpha_min', float),
('l1_alpha_max', float),
('num_l1_alpha', int),
('l1_ratio', float),
('save_solution_path', int)])
class ALM(object):
"""Calculate harmonic and anharmonic interatomic force constants
Attributes
----------
lavec : ndarray
Basis vectors. a, b, c are given as row vectors.
shape=(3, 3), dtype='double'
xcoord : ndarray
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : ndarray
Atomic numbers.
shape=(num_atoms,), dtype='intc'
kind_names : OrderedDict
Pairs of (atomic number, element name). Since the atomic number is the
key of OrderedDict, only unique atomic numbers are stored and the
order of ``numbers`` is preserved in the keys of this OrderedDict.
displacements : ndarray
Displacements of atoms in supercells used as training data.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
forces : ndarray
Forces of atoms in supercells used as training data.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
verbosity : int
Level of the output frequency either 0 (no output) or
1 (normal output). Default is 0.
output_filename_prefix : str
More detailed logs are stored in files when this is given. This string
is used to the prefix of filenames of logs.
optimizer_control : dict
Parameters to use elastic net regression.
cv_l1_alpha : float (read-only)
Alpha value to minimize fitting error of elastic net regression
obtained by cross validation.
"""
def __init__(self, lavec, xcoord, numbers, verbosity=0):
"""
Parameters
----------
lavec : array_like
Basis vectors. a, b, c are given as column vectors.
shape=(3, 3), dtype='double'
xcoord : array_like
Fractional coordinates of atomic points.
shape=(num_atoms, 3), dtype='double'
numbers : array_like
Atomic numbers.
shape=(num_atoms,), dtype='intc'
verbosity : int
Level of the output frequency either 0 (no output) or
1 (normal output). Default is 0.
"""
self._id = None
self._lavec = None
self._xcoord = None
self._numbers = None
self._verbosity = False
self._kind_names = None
self._iconst = 11
self._maxorder = 1
self.lavec = lavec
self.xcoord = xcoord
self.numbers = numbers
self._verbosity = verbosity
self._output_filename_prefix = None
# Whether python parameters are needed to be copied to C++ instance
# or not.
self._need_transfer = True
# self.define() has been done or not.
self._defined = False
@property
def lavec(self):
"""Getter of basis vectors
Returns
-------
lavec : ndarray
Copy of basis vectors. a, b, c are given as row vectors.
shape=(3, 3), dtype='double', order='C'
"""
return np.array(self._lavec, dtype='double', order='C')
@lavec.setter
def lavec(self, lavec):
"""Setter of basis vectors
Parameters
----------
lavec : array_like
Basis vectors. a, b, c are given as row vectors.
shape=(3, 3), dtype='double', order='C'
"""
self._need_transfer = True
self._lavec = np.array(lavec, dtype='double', order='C')
@property
def xcoord(self):
"""Getter of atomic point coordinates
Returns
-------
xcoord : ndarray
Atomic point coordinates.
shape=(num_atom, 3), dtype='double', order='C'
"""
return np.array(self._xcoord, dtype='double', order='C')
@xcoord.setter
def xcoord(self, xcoord):
"""Setter of atomic point coordinates
Returns
-------
xcoord : ndarray
Atomic point coordinates.
shape=(num_atom, 3), dtype='double', order='C'
"""
self._need_transfer = True
self._xcoord = np.array(xcoord, dtype='double', order='C')
@property
def numbers(self):
"""Getter of atomic numbers
Returns
-------
numbers : ndarray
Atomic numbers.
shape=(num_atom,), dtype='intc', order='C'
"""
return np.array(self._numbers, dtype='intc')
@numbers.setter
def numbers(self, numbers):
"""Setter of atomic numbers
Parameters
----------
numbers : ndarray
Atomic numbers.
shape=(num_atom,), dtype='intc', order='C'
"""
self._need_transfer = True
self._numbers = np.array(numbers, dtype='intc')
self._kind_names = OrderedDict.fromkeys(self._numbers)
for key in self._kind_names:
self._kind_names[key] = atom_names[key % 118]
@property
def kind_names(self):
return self._kind_names
@property
def verbosity(self):
return self._verbosity
@verbosity.setter
def verbosity(self, verbosity):
"""Set verbosity of output.
Parameters
----------
verbosity : int
Choose the level of the output frequency from
0 (no output) or 1 (normal output).
"""
self._verbosity = verbosity
self._set_verbosity()
def set_verbosity(self, verbosity):
self.verbosity = verbosity
def __enter__(self):
self.alm_new()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.alm_delete()
def alm_new(self):
"""Create ALM instance in C++.
This is also called by context manager when entering the block.
ex.::
with ALM(lavec, xcoord, numbers) as alm:
Note
----
When an ALM instance is created by ``alm_new``, it must be deleted
by ``alm_delete`` to avoid memory leak.
"""
if self._id is None:
self._id = alm.alm_new()
if self._id < 0:
raise RuntimeError("Too many ALM objects")
if self._verbosity is not None:
self.verbosity = self._verbosity
else:
raise("This ALM object is already initialized.")
def alm_delete(self):
"""Delete ALM instance in C++.
This is also called by context manager when exiting the block.
ex.::
with ALM(lavec, xcoord, numbers) as alm:
"""
if self._id is None:
self._show_error_not_initizalied()
alm.alm_delete(self._id)
self._id = None
def suggest(self):
"""Compute displacement patterns to obtain force constants."""
if self._id is None:
self._show_error_not_initizalied()
if self._defined:
alm.suggest(self._id)
else:
self._show_error_not_defined()
def optimize(self, solver='dense'):
"""Fit force constants to forces.
Parameters
----------
solver : str, default='dense'
Solver choice for fitting either 'dense' or 'SimplicialLDLT'.
- When solver='dense', the fitting is performed with the
singular value decomposition implemented in LAPACK.
- When solver='SimplicialLDLT', the fitting is performed with
the sparse solver class SimplicialLDLT implemented in
Eigen3 library.
Returns
-------
info : int
This tells condition how fitting went.
0 if the fitting is successful, 1 otherwise.
"""
if self._id is None:
self._show_error_not_initizalied()
if not self._defined:
self._show_error_not_defined()
solvers = {'dense': 'dense', 'simplicialldlt': 'SimplicialLDLT'}
if solver.lower() not in solvers:
msgs = ["The given solver option is not supported.",
"Available options are 'dense' and 'SimplicialLDLT'."]
raise ValueError("\n".join(msgs))
info = alm.optimize(self._id, solvers[solver.lower()])
return info
@property
def output_filename_prefix(self):
return self._output_filename_prefix
@output_filename_prefix.setter
def output_filename_prefix(self, prefix):
"""Set output prefix of output filename"""
if self._id is None:
self._show_error_not_initizalied()
if type(prefix) is str:
self._output_filename_prefix = prefix
alm.set_output_filename_prefix(self._id, prefix)
def set_output_filename_prefix(self, prefix):
self.output_filename_prefix = prefix
@property
def optimizer_control(self):
if self._id is None:
self._show_error_not_initizalied()
optctrl = alm.get_optimizer_control(self._id)
keys = optimizer_control_data_types.keys()
optcontrol = dict(zip(keys, optctrl))
return optcontrol
@optimizer_control.setter
def optimizer_control(self, optcontrol):
if self._id is None:
self._show_error_not_initizalied()
keys = optimizer_control_data_types.keys()
optctrl = []
optcontrol_l = {key.lower(): optcontrol[key] for key in optcontrol}
for i, key in enumerate(optcontrol):
if key.lower() not in keys:
msg = "%s is not a valide key for optimizer control." % key
raise KeyError(msg)
for i, key in enumerate(keys):
if key in optcontrol_l:
optctrl.append(optcontrol_l[key])
else:
optctrl.append(None)
alm.set_optimizer_control(self._id, optctrl)
def set_optimizer_control(self, optcontrol):
self.optimizer_control = optcontrol
@property
def displacements(self):
"""Get displacements
Returns
--------
u : ndarray
Atomic displacement patterns in supercells in Cartesian.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
"""
if self._id is None:
self._show_error_not_initizalied()
ndata = alm.get_number_of_data(self._id)
u = np.zeros((ndata, len(self._xcoord), 3), dtype='double', order='C')
succeeded = alm.get_u_train(self._id, u)
if succeeded:
return u
else:
return None
@displacements.setter
def displacements(self, u):
"""Set displacements
Parameters
----------
u : array_like
Atomic displacement patterns in supercells in Cartesian.
shape=(supercells, num_atoms, 3), dtype='double'
"""
if self._id is None:
self._show_error_not_initizalied()
if u.ndim != 3:
msg = "Displacement array has to be three dimensions."
raise RuntimeError(msg)
alm.set_u_train(self._id, np.array(u, dtype='double', order='C'))
@property
def forces(self):
"""Get forces
Returns
--------
f : ndarray
Forces in supercells.
shape=(supercells, num_atoms, 3), dtype='double', order='C'
"""
if self._id is None:
self._show_error_not_initizalied()
ndata = alm.get_number_of_data(self._id)
f = np.zeros((ndata, len(self._xcoord), 3), dtype='double', order='C')
succeeded = alm.get_f_train(self._id, f)
if succeeded:
return f
else:
return None
@forces.setter
def forces(self, f):
"""Set forces
Parameters
----------
f : array_like
Forces in supercells.
shape=(supercells, num_atoms, 3), dtype='double'
"""
if self._id is None:
self._show_error_not_initizalied()
if f.ndim != 3:
msg = "Force array has to be three dimensions."
raise RuntimeError(msg)
alm.set_f_train(self._id, np.array(f, dtype='double', order='C'))
def set_training_data(self, u, f):
"""Set displacements and respective forces in supercell.
Parameters
----------
u : array_like
Atomic displacement patterns in supercells in Cartesian.
dtype='double'
shape=(supercells, num_atoms, 3)
f : array_like
Forces in supercells.
dtype='double'
shape=(supercells, num_atoms, 3)
"""
self.displacements = u
self.forces = f
def set_displacement_and_force(self, u, f):
warnings.warn("set_displacement_and_force is deprecated. "
"Use set_training_data.", DeprecationWarning)
self.set_training_data(u, f)
def define(self, maxorder, cutoff_radii=None, nbody=None,
symmetrization_basis='Lattice'):
"""Define the Taylor expansion potential.
Parameters
----------
maxorder : int
Maximum order of the Taylor expansion potential.
- If ``maxorder = 1``, only harmonic (2nd-order) terms are
considered.
- If ``maxorder = 2``, both harmonic and cubic terms are
considered.
cutoff_radii : array_like, default = None
Cutoff radii defined for each order.
When a negative value is provided, the cutoff radius is not used.
dtype='double'
shape=(maxorder, num_elems, num_elems)
nbody : array_like, default = None
Option to neglect multi-body interactions.
dtype='intc'
shape=(maxorder,)
symmetrization_basis : str, default='Lattice'
Either 'Cartesian' or 'Lattice'. Symmetrization of force constants
is done either in the matrix based on crystal coordinates
('Lattice') or Cartesian coordinates ('Cartesian').
"""
if self._id is None:
self._show_error_not_initizalied()
self._transfer_parameters()
if nbody is None:
nbody = []
for i in range(maxorder):
nbody.append(i + 2)
else:
if len(nbody) != maxorder:
msg = "The size of nbody must be equal to maxorder."
raise RuntimeError(msg)
if cutoff_radii is None:
_cutoff_radii = None
else:
_cutoff_radii = np.array(cutoff_radii, dtype='double', order='C')
nelem = len(_cutoff_radii.ravel())
if (nelem // maxorder) * maxorder != nelem:
msg = "The array shape of cutoff_radii is wrong."
raise RuntimeError(msg)
nkd = int(round(np.sqrt(nelem // maxorder)))
if nkd ** 2 - nelem // maxorder != 0:
msg = "The array shape of cutoff_radii is wrong."
raise RuntimeError(msg)
_cutoff_radii = np.reshape(_cutoff_radii, (maxorder, nkd, nkd),
order='C')
self._maxorder = maxorder
if symmetrization_basis.lower() in ['lattice', 'cartesian']:
fc_basis = symmetrization_basis.capitalize()
else:
fc_basis = 'Lattice'
alm.define(self._id,
maxorder,
np.array(nbody, dtype='intc'),
_cutoff_radii,
fc_basis)
alm.init_fc_table(self._id)
self._defined = True
def set_constraint(self, translation=True, rotation=False):
"""Set constraints for the translational and rotational invariances
Parameters
----------
translation : bool, optional (default = True)
When set to ``True``, the translational invariance
(aka acoustic sum rule) is imposed between force constants.
rotation : bool, optional (default = False)
When set to ``True``, the rotational invariance is imposed between
force constants. This function is not implemented.
"""
if rotation is True:
raise("Rotational invariance is not supported in python API.")
if translation is True:
iconst = 11
else:
iconst = 10
self._iconst = iconst
alm.set_constraint_type(self._id, self._iconst)
def getmap_primitive_to_supercell(self):
"""Returns the mapping information from the primitive cell to the supercell.
Returns
-------
map_p2s : array_like
The mapping information of atoms from the primitive cell to the
supercell.
dtype='intc'
shape = (num_trans, num_atoms_primitive)
"""
if self._id is None:
self._show_error_not_initizalied()
if not self._defined:
self._show_error_not_defined()
map_p2s = np.zeros(len(self._xcoord), dtype='intc')
ntrans = alm.get_atom_mapping_by_pure_translations(self._id, map_p2s)
return map_p2s.reshape((ntrans, -1))
def get_displacement_patterns(self, fc_order):
"""Returns the displacement patterns to obtain force constants.
Parameters
----------
fc_order : int
The order of force constants to get the displacement patterns.
- If ``fc_order = 1``, returns patterns for harmonic force
constants.
- If ``fc_order = 2``, returns patterns for cubic force constants.
- If ``fc_order = 3``, returns patterns for quartic force
constants.
- ...
Returns
-------
all_disps : array_like, shape = (n_patterns,)
The array of tuples (``atom_index``, ``direction``, ``basis``),
where ``direction`` is the numpy.ndarray of size = (3,)
representing the direction of the displacement,
and ``basis`` is a string either "Cartesian" or "Fractional".
"""
if self._id is None:
self._show_error_not_initizalied()
if fc_order > self._maxorder:
msg = ("The fc_order must not be larger than the maximum order "
"(maxorder).")
raise ValueError(msg)
numbers = self._get_number_of_displaced_atoms(fc_order)
tot_num = np.sum(numbers)
atom_indices = np.zeros(tot_num, dtype='intc')
disp_patterns = np.zeros((tot_num, 3), dtype='double', order='C')
nbasis = alm.get_displacement_patterns(self._id,
atom_indices,
disp_patterns,
fc_order)
basis = ["Cartesian", "Fractional"][nbasis]
all_disps = []
pos = 0
for num in numbers:
disp = []
for i in range(num):
disp.append((atom_indices[pos], disp_patterns[pos], basis))
pos += 1
all_disps.append(disp)
return all_disps
def get_fc(self, fc_order, mode="origin", permutation=True):
"""Returns the force constant values
Parameters
----------
fc_order : int
The order of force constants to get.
- If ``fc_order = 1``, returns harmonic force constants.
- If ``fc_order = 2``, returns cubic force constants.
- If ``fc_order = 3``, returns quartic force constants.
- ...
mode : str, optional (default="origin")
The choice of the force constant list to be returned.
- If "origin", returns the reducible set of force constants,
whose first element corresponds to an atom in the
primitive cell at the origin.
- If "all", returns the all non-zero elements of force constants
in the supercell.
- If "irreducible" or "irred", returns the irreducible set of
force constants.
permutation : bool (default=True)
The flag for printing out elements with permutation symmetry.
Effective only when ``mode = origin`` or ``mode = all``.
- If True, returns force constants after replicating elements
by the permutation of indices.
- If False, returns force constants without replicating elements
by the permutation of indices. For "origin" and "all", all
indices except the first index participate to the permutation
of indices to reduce the number of the output values.
Returns
-------
fc_values : array_like, dtype='double', shape=(num_fc,)
Force constant values.
elem_indices : array_like, dtype='int', shape=(num_fc, fc_order + 1)
Array of flattened indices 3 * index_atom + index_xyz.
Note
----
This method returns force constants in Cartesian basis
when ``mode = origin`` and ``mode = all`.
When ``mode = irred``, it returns the irreducible set of
force constants in the basis defined via "symmetrization_basis"
of the alm.define method.
"""
if self._id is None:
self._show_error_not_initizalied()
if fc_order > self._maxorder:
msg = ("The fc_order must not be larger than the maximum order "
"(maxorder).")
raise ValueError(msg)
perm_int = permutation * 1
if mode == "origin":
fc_length = self._get_number_of_fc_origin(fc_order, perm_int)
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_origin(self._id, fc_values, elem_indices, perm_int)
return fc_values, elem_indices
elif mode == "irreducible" or mode == "irred":
fc_length = self._get_number_of_irred_fc_elements(fc_order)
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_irreducible(self._id, fc_values, elem_indices)
return fc_values, elem_indices
elif mode == "all":
map_p2s = np.zeros(len(self._xcoord), dtype='intc')
ntrans = alm.get_atom_mapping_by_pure_translations(self._id,
map_p2s)
fc_length = self._get_number_of_fc_origin(
fc_order, perm_int) * ntrans
fc_values = np.zeros(fc_length, dtype='double')
elem_indices = np.zeros((fc_length, fc_order + 1),
dtype='intc', order='C')
alm.get_fc_all(self._id, fc_values, elem_indices, perm_int)
return fc_values, elem_indices
else:
raise ValueError("Invalid mode in get_fc.")
def set_fc(self, fc_in):
"""Copy force constant obtained by an external optimizer to the ALM instance.
Parameters
----------
fc_in : array_like
The irreducible set of force constants.
dtype='double'
shape=(num_fc,)
Note
----
When an external optimizer, such as numpy.linalg.lstsq, is used to fit
force constants, the force constants need to be passed to
the ALM instance by ``set_fc`` to use the ``get_fc`` method.
"""
if self._id is None:
self._show_error_not_initizalied()
maxorder = self._maxorder
fc_length_irred = 0
for i in range(maxorder):
fc_length_irred += self._get_number_of_irred_fc_elements(i + 1)
if fc_length_irred != len(fc_in):
msg = "The size of the given force constant array is incorrect."
raise RuntimeError(msg)
alm.set_fc(self._id, np.array(fc_in, dtype='double', order='C'))
def get_matrix_elements(self):
"""Returns the sensing matrix A and force vector b
Returns
-------
amat : ndarray, dtype='double'
shape=(3 * num_atoms * ndata_training, num_fc_irred), order='F'.
The sensing matrix A calculated from the displacements.
bvec : ndarray, dtype='double'
shape=(3 * num_atoms * ndata_training,)
The vector b calculated from the atomic forces.
Note
----
From the amat (``A``) and bvec (``b``), the force constant vector ``x``
can be obtained by solving the least-square problem:
x = argmin_{x} | Ax-b|^{2}.
"""
if self._id is None:
self._show_error_not_initizalied()
maxorder = self._maxorder
nrows = self._get_nrows_amat()
fc_length = 0
for i in range(maxorder):
fc_length += self._get_number_of_irred_fc_elements(i + 1)
amat = np.zeros(nrows * fc_length, dtype='double', order='C')
bvec = np.zeros(nrows, dtype='double')
alm.get_matrix_elements(self._id, amat, bvec)
return (np.reshape(amat, (nrows, fc_length), order='F'), bvec)
@property
def cv_l1_alpha(self):
"""Returns L1 alpha at minimum CV"""
if self._id is None:
self._show_error_not_initizalied()
return alm.get_cv_l1_alpha(self._id)
def get_cv_l1_alpha(self):
return self.cv_l1_alpha
def _transfer_parameters(self):
if self._need_transfer:
self._set_cell()
self._need_transfer = False
self._defined = False
def _set_cell(self):
"""Inject crystal structure in C++ instance"""
if self._id is None:
self._show_error_not_initizalied()
if self._lavec is None:
msg = "Basis vectors are not set."
raise RuntimeError(msg)
if self._xcoord is None:
msg = "Atomic point coordinates (positions) are not set."
raise RuntimeError(msg)
if self._numbers is None:
msg = "Atomic numbers are not set."
raise RuntimeError(msg)
if len(self._xcoord) != len(self._numbers):
msg = "Numbers of atomic points and atomic numbers don't agree."
raise RuntimeError(msg)
kind_numbers = np.array(list(self._kind_names.keys()), dtype='intc')
alm.set_cell(self._id, self._lavec, self._xcoord, self._numbers,
kind_numbers)
def _set_verbosity(self):
"""Inject verbosity in C++ instance."""
if self._id is None:
self._show_error_not_initizalied()
alm.set_verbosity(self._id, self._verbosity)
def _get_nrows_amat(self):
"""Private method to return the number of training data sets"""
if self._id is None:
self._show_error_not_initizalied()
nrows_amat = alm.get_nrows_amat(self._id)
return nrows_amat
def _get_id(self):
"""Private method to return the instance ID"""
return self._id
def _get_number_of_displacement_patterns(self, fc_order):
"""Private method to return the number of displacement patterns
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
return alm.get_number_of_displacement_patterns(self._id, fc_order)
def _get_number_of_displaced_atoms(self, fc_order):
"""Private method to return the number of displaced atoms
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
num_disp_patterns = self._get_number_of_displacement_patterns(
fc_order)
numbers = np.zeros(num_disp_patterns, dtype='intc')
alm.get_number_of_displaced_atoms(self._id, numbers, fc_order)
return numbers
def _get_number_of_fc_elements(self, fc_order):
"""Private method to get the number of force constants
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
return alm.get_number_of_fc_elements(self._id, fc_order)
def _get_number_of_fc_origin(self, fc_order, permutation):
"""Private method to get the number of force constants for fc_origin
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
permutation: int
Flag to include permutated elements
permutation = 0 for skipping permutated elements,
permutation = 1 for including them
"""
return alm.get_number_of_fc_origin(self._id, fc_order, permutation)
def _get_number_of_irred_fc_elements(self, fc_order):
"""Private method to get the number of irreducible set of force constants
Parameters
----------
fc_order : int
The order of force constants.
fc_order = 1 for harmonic, fc_order = 2 for cubic ...
"""
return alm.get_number_of_irred_fc_elements(self._id, fc_order)
def _show_error_not_initizalied(self):
"""Private method to raise an error"""
msg = ("This ALM instance has to be initialized by ALM.alm_new() or "
"context manager.")
raise RuntimeError(msg)
def _show_error_not_defined(self):
msg = "This ALM.define() has to be done beforehand."
raise RuntimeError(msg)
|
StarcoderdataPython
|
125038
|
# Code for CVPR'21 paper:
# [Title] - "CoLA: Weakly-Supervised Temporal Action Localization with Snippet Contrastive Learning"
# [Author] - <NAME>*, <NAME>, <NAME>, <NAME> and <NAME>
# [Github] - https://github.com/zhang-can/CoLA
import numpy as np
import os
from easydict import EasyDict as edict
cfg = edict()
cfg.GPU_ID = '0'
cfg.LR = '[0.0001]*6000'
cfg.NUM_ITERS = len(eval(cfg.LR))
cfg.NUM_CLASSES = 20
cfg.MODAL = 'all'
cfg.FEATS_DIM = 2048
cfg.BATCH_SIZE = 16
cfg.DATA_PATH = './data/THUMOS14'
cfg.NUM_WORKERS = 8
cfg.LAMBDA = 0.01
cfg.R_EASY = 5
cfg.R_HARD = 20
cfg.m = 3
cfg.M = 6
cfg.TEST_FREQ = 100
cfg.PRINT_FREQ = 20
cfg.CLASS_THRESH = 0.2
cfg.NMS_THRESH = 0.6
cfg.CAS_THRESH = np.arange(0.0, 0.25, 0.025)
cfg.ANESS_THRESH = np.arange(0.1, 0.925, 0.025)
cfg.TIOU_THRESH = np.linspace(0.1, 0.7, 7)
cfg.UP_SCALE = 24
cfg.GT_PATH = os.path.join(cfg.DATA_PATH, 'gt.json')
cfg.SEED = 0
cfg.FEATS_FPS = 25
cfg.NUM_SEGMENTS = 750
cfg.CLASS_DICT = {'BaseballPitch': 0, 'BasketballDunk': 1, 'Billiards': 2,
'CleanAndJerk': 3, 'CliffDiving': 4, 'CricketBowling': 5,
'CricketShot': 6, 'Diving': 7, 'FrisbeeCatch': 8,
'GolfSwing': 9, 'HammerThrow': 10, 'HighJump': 11,
'JavelinThrow': 12, 'LongJump': 13, 'PoleVault': 14,
'Shotput': 15, 'SoccerPenalty': 16, 'TennisSwing': 17,
'ThrowDiscus': 18, 'VolleyballSpiking': 19}
|
StarcoderdataPython
|
6677349
|
__author__ = 'pvde'
"""
Limitations:
Keys, Certificates and Signing:
- Certificate Policies are not checked
- Certificates are not validated
- Signatures on input CPPs are not validated
- The generated CPA is not signed
- No support for PGP (which is theoretically needed for AS1)
- Certificate chain validation against specified trust anchors only triggers a match on
root CA certificate. Any intermediate certificates are not matched against the trust
anchor set.
Transports:
- IMAP ?
Limitations / features:
- SecurityPolicy only matches "href", does not intersect XML children yet.
"""
import lxml.etree, logging, datetime, isodate, hashlib, base64, uuid, re, string
from copy import deepcopy
import schema
logging.basicConfig(level=logging.DEBUG)
_NSMAP = {'cppa': 'http://docs.oasis-open.org/ebcore/ns/cppa/v3.0',
'ds': 'http://www.w3.org/2000/09/xmldsig#',
'xml': 'http://www.w3.org/XML/1998/namespace',
'xkms': 'http://www.w3.org/2002/03/xkms#',
'dsig11' : 'http://www.w3.org/2009/xmldsig11#'
}
unreferenced_cert_transform = lxml.etree.XSLT(
lxml.etree.XML("""<?xml version="1.0" encoding="UTF-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:cppa="http://docs.oasis-open.org/ebcore/ns/cppa/v3.0"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
version="1.0">
<xsl:template match="cppa:PartyInfo/cppa:Certificate | cppa:CounterPartyInfo/cppa:Certificate ">
<xsl:variable name="id" select="@id"></xsl:variable>
<xsl:choose>
<xsl:when test="//node()[@certId=$id]">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:when>
<xsl:otherwise>
<xsl:comment>Suppressed unreferenced certificate <xsl:value-of select="$id"/> </xsl:comment>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="/ | @* | node()">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
"""))
unreferenced_trustanchor_transform = lxml.etree.XSLT(
lxml.etree.XML("""<?xml version="1.0" encoding="UTF-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:cppa="http://docs.oasis-open.org/ebcore/ns/cppa/v3.0"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
version="1.0">
<xsl:template match="cppa:TrustAnchorSet">
<xsl:variable name="id" select="@id"></xsl:variable>
<xsl:choose>
<xsl:when test="//node()[@certId=$id]">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:when>
<xsl:otherwise>
<xsl:comment>Suppressed unreferenced trust anchor set <xsl:value-of select="$id"/> </xsl:comment>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="/ | @* | node()">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
"""))
unreferenced_ssh_key_transform = lxml.etree.XSLT(
lxml.etree.XML("""<?xml version="1.0" encoding="UTF-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:cppa="http://docs.oasis-open.org/ebcore/ns/cppa/v3.0"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
version="1.0">
<xsl:template match="cppa:PartyInfo/cppa:SSHKey | cppa:CounterPartyInfo/cppa:SSHKey ">
<xsl:variable name="id" select="@id"></xsl:variable>
<xsl:choose>
<xsl:when test="//node()[@keyId=$id]">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:when>
<xsl:otherwise>
<xsl:comment>Suppressed unreferenced SSH Key <xsl:value-of select="$id"/> </xsl:comment>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="/ | @* | node()">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
"""))
unreferenced_policy_set_transform = lxml.etree.XSLT(
lxml.etree.XML("""<?xml version="1.0" encoding="UTF-8"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:cppa="http://docs.oasis-open.org/ebcore/ns/cppa/v3.0"
xmlns:xml="http://www.w3.org/XML/1998/namespace"
xmlns:ds="http://www.w3.org/2000/09/xmldsig#"
version="1.0">
<xsl:template match="cppa:PartyInfo/cppa:CertificatePolicySet | cppa:CounterPartyInfo/cppa:CertificatePolicySet ">
<xsl:variable name="id" select="@id"></xsl:variable>
<xsl:choose>
<xsl:when test="//node()[@setId=$id]">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:when>
<xsl:otherwise>
<xsl:comment>Suppressed unreferenced policy set <xsl:value-of select="$id"/> </xsl:comment>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="/ | @* | node()">
<xsl:copy>
<xsl:apply-templates select="@* | node()" />
</xsl:copy>
</xsl:template>
</xsl:stylesheet>
"""))
def unify(acpp, bcpp, nsmap = {}, agreementidfun = None, agreementid=None,
requested_activation_date=None, requested_expiration_date=None,
acpp_url=None, bcpp_url=None, default_handler=None, handle_defaults=False,
delegation_handler=None,
remove_unused_certs=False):
"""
@param acpp: a CPP
@param bcpp: another CPP
@param nsmap: optional dictionary of additional namespaces
@param agreementidfun: optional function to determine the agreement identifier
@param agreementid: optional specific agreement identifier to use
@return:
"""
unifier = CPABuilder(nsmap, agreementidfun, default_handler=default_handler,
delegation_handler=delegation_handler)
return unifier.unify(acpp, bcpp, agreementid=agreementid,
requested_activation_date=requested_activation_date,
requested_expiration_date=requested_expiration_date,
acpp_url=acpp_url,
bcpp_url=bcpp_url,
handle_defaults=handle_defaults,
remove_unused_certs=remove_unused_certs)
class UnificationException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class CPABuilder():
def __init__(self, nsmap = {}, agreementidfun = None,
default_handler = None, delegation_handler = None):
"""
@param nsmap: optional dictionary of additional namespaces
@param agreementidfun: optional function to determine the agreement identifier
@return:
"""
self.reset_caches()
self.NSMAP = {}
self.agreementidfun = self._agreementidfun
if agreementidfun != None:
self.agreementidfun = agreementidfun
if default_handler != None:
self.default_handler = default_handler
else:
self.default_handler = _identity_transform
self.channel_handlers = {
cppa('NamedChannel'): self.unify_named_channel,
cppa('AS1Channel'): self.unify_ediint_channel,
cppa('AS2Channel'): self.unify_ediint_channel,
cppa('AS3Channel'): self.unify_ediint_channel,
cppa('ebMS2Channel'): self.unify_ebms2_channel,
cppa('WSChannel'): self.unify_ws_channel,
cppa('ebMS3Channel'): self.unify_ebms3_channel,
cppa('TransportChannel') : self.unify_transport_channel,
cppa('AMQPChannel') : self.unify_amqp_channel
}
self.packaging_handlers = {
cppa('SOAPWithAttachmentsEnvelope'):
self.unify_soap_with_attachments_envelope,
cppa('SimpleSOAPEnvelope'):
self.unify_simple_soap_envelope,
cppa('MIMEEnvelope'):
self.unify_mime_envelope
}
self.mimepart_handlers = {
cppa('SimpleMIMEPart'):
self.unify_simple_mime_part,
cppa('MIMEMultipartRelated'):
self.unify_mime_multipart_related,
cppa('ExternalPayload'):
self.unify_external_payload
}
# load the defaults
for prefix in _NSMAP:
self.NSMAP[prefix] = _NSMAP[prefix]
# load any extension namespaces
for prefix in nsmap:
self.NSMAP[prefix] = nsmap[prefix]
# Delegation context handler.
if delegation_handler != None:
self.is_connected_to = delegation_handler
else:
self.is_connected_to = self.default_is_connected_to
def reset_caches(self):
# Resets the unification data structures.
# In the future, other data may be cached as well that is independent of the
# input CPPs. For example, XKMS results, OCSP queries etc.
self.included_service_specifications_counter = 0
self.included_certificates = {}
self.unify_channels_results = {}
self.unify_channels_exceptions = {}
self.unify_transport_results = {}
self.unify_transport_exceptions = {}
self.unify_payload_profile_results = {}
self.unify_payload_profile_exceptions = {}
self.unify_package_results = {}
self.unify_package_exceptions = {}
self.depends_on = {}
self.included_components = {}
self.shortened = {}
self.collisions = {}
# Delegation context handler.
# By default there are no connections
def default_is_connected_to(self,
frompartyid,
frompartytype,
fromcppid,
service,
action,
direction,
topartyid,
topartyidtype,
topartycppid):
return False
def unify(self, acpp, bcpp,
partyrole=None, counterpartyrole=None,
agreementid = None,
requested_activation_date=None,
requested_expiration_date=None,
acpp_url=None,
bcpp_url=None,
handle_defaults=False,
remove_unused_certs=False):
self.reset_caches()
logging.debug('Inline channel features ..')
acpp = self.inline_channel_features(acpp)
bcpp = self.inline_channel_features(bcpp)
cpp_level_acl_check(acpp, bcpp)
if handle_defaults:
logging.debug('Processing CPPs for defaults..')
acpp = self.default_handler(deepcopy(acpp))
bcpp = self.default_handler(deepcopy(bcpp))
prefix_identifiers(acpp,'a_')
prefix_identifiers(bcpp,'b_')
cpa = lxml.etree.Element(cppa('CPA'),
nsmap = self.NSMAP)
acppid, bcppid, activation, expiration = self.unify_profileinfo(cpa, acpp, bcpp,
agreementid,
requested_activation_date,
requested_expiration_date,
acpp_url,
bcpp_url)
self.initialize_partyinfo(cpa, acpp, 'PartyInfo', handle_defaults)
self.initialize_partyinfo(cpa, bcpp, 'CounterPartyInfo', handle_defaults)
logging.info("Unifying {} {}".format(acppid, bcppid))
a_service_specifications = acpp.xpath('cppa:ServiceSpecification',
namespaces=self.NSMAP)
for a_service_specification in a_service_specifications:
logging.info('Processing service specification for {} {}'.format(
(a_service_specification.xpath('child::cppa:PartyRole/@name',
namespaces=_NSMAP))[0],
(a_service_specification.xpath('child::cppa:CounterPartyRole/@name',
namespaces=_NSMAP))[0]
))
try:
self.unify_servicebinding_list(cpa,
acpp,
bcpp,
a_service_specification,
acppid,
bcppid,
partyrole,
counterpartyrole,
activation,
expiration)
except UnificationException as e:
logging.info('Exception in Service Specification: {}'.format(e.value))
if self.included_service_specifications_counter == 0:
# There has to be at least one role pair
# for which there is at least one matching binding
situation = 'No matching service specifications for {}-{}'.format(acppid,
bcppid)
logging.info(situation)
raise UnificationException(situation)
else:
logging.info('Matched {} service specification(s)'.format(
self.included_service_specifications_counter)
)
if 'actionbinding' in self.included_components:
for ab in self.included_components['actionbinding']:
if ab in self.depends_on and 'channel' in self.depends_on[ab]:
for dc in self.depends_on[ab]['channel']:
self.confirm_included('channel', dc)
if ab in self.depends_on and 'payloadprofile' in self.depends_on[ab]:
for pp in self.depends_on[ab]['payloadprofile']:
self.confirm_included('payloadprofile', pp)
if 'channel' in self.included_components:
for ch in self.included_components['channel']:
if ch in self.depends_on and 'channel' in self.depends_on[ch]:
for ch2 in self.depends_on[ch]['channel']:
self.confirm_included('channel', ch2)
for ch in self.included_components['channel']:
cpa.append(self.unify_channels_results[ch])
for ch in self.included_components['channel']:
if ch in self.depends_on and 'transport' in self.depends_on[ch]:
for tid in self.depends_on[ch]['transport']:
self.confirm_included('transport', tid)
if 'transport' in self.included_components:
for tp in self.included_components['transport']:
cpa.append(self.unify_transport_results[tp])
if 'payloadprofile' in self.included_components:
for pp in self.included_components['payloadprofile']:
cpa.append(self.unify_payload_profile_results[pp])
if 'channel' in self.included_components:
for ch in self.included_components['channel']:
if ch in self.depends_on and 'package' in self.depends_on[ch]:
for ppid in self.depends_on[ch]['package']:
(a, b, c, d) = ppid
pp = self.unify_package_results[ppid]
logging.info("Unifying {}-{} {}-{}: {}".format(a, b, c, d, pp.tag))
pp.set('id', self.cppaid(a, b, c, d))
cpa.append(self.unify_package_results[ppid])
if remove_unused_certs:
# first remove any unreferenced trust anchor
cpa = unreferenced_trustanchor_transform(cpa).getroot()
# then remove any unrefenced cert
cpa = unreferenced_cert_transform(cpa).getroot()
# then remove any unreference policy set
cpa = unreferenced_policy_set_transform(cpa).getroot()
# then remove unused SSH keys
cpa = unreferenced_ssh_key_transform(cpa).getroot()
return self.c14n(cpa)
def c14n(self, tree):
newtree = lxml.etree.Element(tree.tag, nsmap=self.NSMAP)
newtree.text = tree.text
for att in tree.attrib:
newtree.attrib[att] = tree.attrib[att]
for element in tree:
if element is None:
pass
#elif lxml.etree.iselement(element):
elif type(element) is lxml.etree._Element:
newtree.append(self.c14n(element))
else:
newtree.append(element)
return newtree
def unify_profileinfo(self, cpa, acpp, bcpp, agreementid=None,
requested_activation_date=None,
requested_expiration_date=None,
acpp_url=None,
bccp_url=None):
acppid = acpp.xpath('child::cppa:ProfileInfo/cppa:ProfileIdentifier/text()',
namespaces=self.NSMAP)[0]
bcppid = bcpp.xpath('child::cppa:ProfileInfo/cppa:ProfileIdentifier/text()',
namespaces=self.NSMAP)[0]
agreementinfo = lxml.etree.SubElement(cpa,
cppa('AgreementInfo'))
agreementidel = lxml.etree.SubElement(agreementinfo,
cppa('AgreementIdentifier'))
#agreementid.text = "{}_{}".format(acppid, bcppid)
if agreementid != None:
agreementidel.text = agreementid
else:
agreementidel.text = self.agreementidfun(acpp, bcpp)
agreementdescription = lxml.etree.SubElement(agreementinfo, cppa('Description'))
agreementdescription.text = \
"Agreement formed from {} and {} at {}".format(acppid,
bcppid,
datetime.datetime.now().isoformat())
agreementdescription.set(xml('lang'), 'en')
for (pid, pid_url) in [(acppid, acpp_url),
(bcppid, bccp_url)]:
pid2 = lxml.etree.SubElement(agreementinfo, cppa('ProfileIdentifier'))
pid2.text = pid
if pid_url != None:
pid2.set('href', pid_url)
activation, expiration = self.init_cpa_validity_interval(acpp, bcpp, agreementinfo,
requested_activation_date,
requested_expiration_date)
return acppid, bcppid, activation, expiration
def init_cpa_validity_interval(self, acpp, bcpp, agreementinfo,
requested_activation_date=None,
requested_expiration_date=None):
now = datetime.datetime.now()
try:
aphasein = acpp.xpath('child::cppa:ProfileInfo/cppa:PhaseIn/text()',
namespaces=self.NSMAP)[0]
aduration = isodate.isoduration.parse_duration(aphasein)
except:
aduration = datetime.timedelta(0)
try:
bphasein = bcpp.xpath('child::cppa:ProfileInfo/cppa:PhaseIn/text()',
namespaces=self.NSMAP)[0]
bduration = isodate.isoduration.parse_duration(bphasein)
except:
bduration = datetime.timedelta(0)
if aduration < bduration:
activation = now + bduration
else:
activation = now + aduration
if requested_activation_date != None:
if requested_activation_date > activation:
activation = requested_activation_date
activation = self.init_activation_date(_profileinfo(acpp),
_profileinfo(bcpp),
agreementinfo,
activation)
expiration = self.init_expiration_date(_profileinfo(acpp),
_profileinfo(bcpp),
agreementinfo,
requested_expiration_date,
activation)
return activation, expiration
def init_activation_date(self, aparent, bparent, abparent, requested_activation,
toplevel=True):
activation = None
for parent in [aparent, bparent]:
activationl = parent.xpath(
'child::cppa:ActivationDate/text()',
namespaces=self.NSMAP)
if len(activationl) > 0:
specified_activation = isodate.isodatetime.parse_datetime(activationl[0])
if specified_activation > requested_activation:
activation = specified_activation
if toplevel is False and activation is None:
# the CPPs did not specify an activation date and we're in an embedded
# context, where we inherit the top level activation date, we don't
# create an ActivationDate
pass
else:
# we're at top level or there is a specified activation
if activation is None:
activation = requested_activation
activationdate = lxml.etree.SubElement(abparent, cppa('ActivationDate'))
activationdate.text = activation.isoformat()
return activation
def init_expiration_date(self, aparent, bparent, abparent, requested_expiration_date, activation):
expiration = None
for parent in [aparent, bparent]:
expirationL = parent.xpath(
'child::cppa:ExpirationDate/text()',
namespaces=self.NSMAP)
if len(expirationL) > 0:
specified_expiration = isodate.isodatetime.parse_datetime(expirationL[0])
if requested_expiration_date is not None:
if requested_expiration_date < specified_expiration:
specified_expiration = requested_expiration_date
if activation is not None and specified_expiration < activation:
situation = 'Service expires at {} before earliest activation {}'.format(
expirationL[0],
activation.isoformat())
logging.info(situation)
raise UnificationException(situation)
if expiration is None:
expiration = specified_expiration
elif specified_expiration < expiration:
expiration = specified_expiration
if not expiration is None:
expirationdate = lxml.etree.SubElement(abparent,
cppa('ExpirationDate'))
expirationdate.text = expiration.isoformat()
return expiration
def _agreementidfun(self, acpp, bcpp):
acppid = acpp.xpath(
'child::cppa:ProfileInfo/cppa:ProfileIdentifier/text()',
namespaces=self.NSMAP)[0]
bcppid = bcpp.xpath(
'child::cppa:ProfileInfo/cppa:ProfileIdentifier/text()',
namespaces=self.NSMAP)[0]
return "{}_{}".format(acppid, bcppid)
def initialize_partyinfo(self, cpa, cpp, element, handle_defaults=False):
partyinfo = lxml.etree.SubElement(cpa, cppa(element))
inelement = cpp.xpath('child::cppa:PartyInfo',
namespaces=self.NSMAP)[0]
for pname in inelement.xpath('child::cppa:PartyName',
namespaces= self.NSMAP):
partyinfo.append( deepcopy(pname))
for pid in inelement.xpath('descendant-or-self::cppa:PartyId',
namespaces= self.NSMAP):
partyinfo.append( deepcopy(pid))
for certificate in inelement.xpath('child::cppa:Certificate',
namespaces= self.NSMAP):
partyinfo.append(deepcopy(certificate))
for anchorset in inelement.xpath('child::cppa:TrustAnchorSet',
namespaces= self.NSMAP):
partyinfo.append(deepcopy(anchorset))
for anchorset in inelement.xpath('child::cppa:CertificatePolicySet',
namespaces= self.NSMAP):
partyinfo.append(deepcopy(anchorset))
self.process_default_certificates(inelement, partyinfo, handle_defaults)
for certificate in inelement.xpath('child::cppa:IDPRegistration',
namespaces= self.NSMAP):
partyinfo.append(deepcopy(certificate))
for certificate in inelement.xpath('child::cppa:IDPRegistrationSet',
namespaces= self.NSMAP):
partyinfo.append(deepcopy(certificate))
for ssh_key in inelement.xpath('child::cppa:SSHKey',
namespaces= self.NSMAP):
partyinfo.append(deepcopy(ssh_key))
return partyinfo
def process_default_certificates(self, inelement, partyinfo, handle_defaults=False):
#if not handle_defaults:
certificate_defaults_list = inelement.xpath('child::cppa:CertificateDefaults',
namespaces= self.NSMAP)
if len(certificate_defaults_list) > 0:
partyinfo.append(deepcopy(certificate_defaults_list[0]))
def unify_servicebinding_list(self,
cpa, acpp, bcpp,
a_service_specification,
acppid, bcppid,
partyrole, counterpartyrole,
activation, expiration,
bindings_match_mode='all'):
arole = a_service_specification.xpath('child::cppa:PartyRole/@name',
namespaces=self.NSMAP)[0]
brole = a_service_specification.xpath('child::cppa:CounterPartyRole/@name',
namespaces=self.NSMAP)[0]
if (partyrole is None or partyrole == arole) \
and (counterpartyrole is None or counterpartyrole == brole):
service_specification = lxml.etree.Element(cppa('ServiceSpecification'))
lxml.etree.SubElement(service_specification, cppa('PartyRole'), name=arole)
lxml.etree.SubElement(service_specification, cppa('CounterPartyRole'), name=brole)
ebbp_constraints_list = []
for attribute in ['uuid', 'name', 'version']:
if attribute in a_service_specification.attrib:
value = a_service_specification.get(attribute)
service_specification.set(attribute, value)
ebbp_constraints_list.append('@{}="{}"'.format(attribute, value))
else:
ebbp_constraints_list.append('not(@{})'.format(attribute))
if len(ebbp_constraints_list) > 0:
ebbp_constraints_list_xp = string.join(ebbp_constraints_list,' and ')
xpq = 'cppa:ServiceSpecification[{} and cppa:PartyRole/@name = "{}"' \
' and cppa:CounterPartyRole/@name = "{}"]'
xpq = xpq.format(ebbp_constraints_list_xp, brole, arole)
else:
xpq = 'cppa:ServiceSpecification[cppa:PartyRole/@name = "{}"' \
' and cppa:CounterPartyRole/@name = "{}"]'
xpq = xpq.format(brole, arole)
try:
b_service_specification = bcpp.xpath(xpq,
namespaces=self.NSMAP)[0]
except IndexError:
situation = 'No ServiceSpecification for {} {} in {}'.format(brole,
arole,
bcppid)
logging.info(situation)
if partyrole is not None and counterpartyrole is not None:
"""
We raise an exception if unification was requested for a specific
PartyRole-CounterPartyRole combination.
Otherwise, we assume it can just be ignored.
"""
raise UnificationException(situation)
else:
logging.info('Processing ACL checks for service {} {}'.format(arole,
brole))
service_specification_acl_check(a_service_specification,
acpp,
b_service_specification,
bcpp)
logging.info('Processing service bindings for {} {}'.format(arole, brole))
a_servicebinding_list = a_service_specification.xpath(
'child::cppa:ServiceBinding',
namespaces=self.NSMAP)
included_bindings_counter = 0
last_exception = None
for counter, a_servicebinding in enumerate(a_servicebinding_list, start=1):
try:
logging.info('Service binding #{}'.format(counter))
acpa_servicebinding = self.unify_servicebinding_from_acpp_party(
acppid,
acpp,
bcppid,
bcpp,
arole,
brole,
a_servicebinding,
b_service_specification,
activation,
expiration)
except UnificationException as e:
last_exception = e
if bindings_match_mode == 'all':
logging.info("UnificationException: {}".format(e.value))
logging.info('Bindings match mode {} ' \
'so abandoning service specification'.format(
bindings_match_mode)
)
# @@@ review the following
# #raise
else:
logging.info('Bindings match mode {}' \
'so ignoring {}'.format(bindings_match_mode,
e.value))
else:
included_bindings_counter += 1
service_specification.append(acpa_servicebinding)
logging.info('Computed {} service binding(s)'.format(included_bindings_counter))
if included_bindings_counter > 0:
logging.info('Total service bindings is {}'.format(included_bindings_counter))
cpa.append(service_specification)
self.included_service_specifications_counter += 1
else:
situation = 'No Service Bindings matched for {}-{} {}-{}: {}'.format(
acppid,
arole,
bcppid,
brole,
last_exception
)
logging.info(situation)
raise UnificationException(situation)
else:
logging.info("Skipping role {}".format(arole))
def unify_servicebinding_from_acpp_party(self,
acppid,
acpp,
bcppid,
bcpp,
arole,
brole,
a_servicebinding,
b_service_specification,
activation,
expiration):
acpa_servicebinding = lxml.etree.Element(cppa('ServiceBinding'))
aserviceEl = a_servicebinding.xpath('child::cppa:Service',
namespaces=self.NSMAP)[0]
aservice = aserviceEl.text
aservicetype = aserviceEl.get('type')
logging.info("Processing service {} type {}".format(
aservice, aservicetype)
)
acpaservice = lxml.etree.SubElement(acpa_servicebinding,
cppa('Service'))
acpaservice.text = aservice
if aservicetype is not None:
acpaservice.set('type', aservicetype)
if aservicetype is None:
bserviceq = 'child::cppa:ServiceBinding[cppa:Service="{}"]'.format(aservice)
else:
bserviceqt = 'child::cppa:ServiceBinding[cppa:Service[text()="{}" and @type="{}"]]'
bserviceq = bserviceqt.format(aservice, aservicetype)
try:
b_servicebinding = b_service_specification.xpath(bserviceq,
namespaces=self.NSMAP)[0]
except:
raise UnificationException(
'Service {} not found for {} {} in {}'.format(aservice,
brole,
arole,
bcppid))
else:
logging.info(
"Unifying definitions for service {}, type {} in role {}".format(
aservice, aservicetype, arole))
activation = self.init_activation_date(a_servicebinding,
b_servicebinding,
acpa_servicebinding,
activation,
toplevel=False)
expiration = self.init_expiration_date(a_servicebinding,
b_servicebinding,
acpa_servicebinding,
expiration,
activation)
self.unify_servicebinding(acppid, acpp,
bcppid, bcpp,
aservice,
a_servicebinding, b_servicebinding,
acpa_servicebinding)
return acpa_servicebinding
def unify_servicebinding(self, acppid, acpp, bcppid, bcpp, service,
a_servicebinding, b_servicebinding,
servicebinding):
logging.info("Unifying service {} in {} and {}".format(service, acppid, bcppid))
service_binding_acl_check(a_servicebinding, acpp,
b_servicebinding, bcpp)
(identifiers, actions) = self.unify_send_receive(acppid, acpp, bcppid, bcpp, service,
a_servicebinding, b_servicebinding,
servicebinding,
"send", "receive",
action_identifiers =[],
actions=[])
(identifiers2, actions2) = self.unify_send_receive(acppid, acpp, bcppid, bcpp, service,
a_servicebinding, b_servicebinding,
servicebinding,
"receive", "send",
action_identifiers=identifiers,
actions=actions)
logging.info("Unified service binding in {} and {} for {}".format(acppid, bcppid, service))
self.check_b_servicebinding(actions2, b_servicebinding)
for id in identifiers2:
self.confirm_included('actionbinding', id)
def unify_send_receive(self,
acppid, acpp,
bcppid, bcpp,
service,
a_servicebinding, b_servicebinding,
ab_servicebinding,
atype, btype,
action_identifiers = [],
actions = []):
try:
asendbinding_list = a_servicebinding.xpath(
'child::cppa:ActionBinding[@sendOrReceive="{}"]'.format(atype),
namespaces=self.NSMAP)
for a_binding in asendbinding_list:
action = a_binding.get('action')
aid = a_binding.get('id')
direction = a_binding.get('sendOrReceive')
logging.info('Processing Service {}, Action {}'.format(service, action))
actionbinding = lxml.etree.Element(cppa('ActionBinding'),
id=aid, sendOrReceive=atype, action=action)
a_reply_to = a_binding.get('replyTo')
if a_reply_to is not None:
actionbinding.set('replyTo', a_reply_to)
bexpr = 'child::cppa:ActionBinding[@action="{}" and @sendOrReceive="{}"]'.format(
action,
btype)
b_binding_list = b_servicebinding.xpath(bexpr,
namespaces=self.NSMAP)
if len(b_binding_list) == 0:
use = a_binding.get('use')
logging.info(
"No match in {} for {}-{} ({})".format(bcppid, service, action, use))
if use != 'optional':
raise UnificationException(
"No match in {} for {}-{}".format(bcppid, service, action)
)
else:
self.unify_send_receive_to_b_list(
acppid, acpp,
bcppid, bcpp,
service,
action,
a_servicebinding, b_servicebinding, ab_servicebinding,
aid,
a_reply_to,
a_binding, b_binding_list, actionbinding, atype,
direction,
action_identifiers,
actions)
except UnificationException as e:
logging.info("Send_Receive exception: {}".format(e.value))
raise
else:
return action_identifiers, actions
def unify_send_receive_to_b_list(self,
acppid, acpp,
bcppid, bcpp,
service,
action,
a_servicebinding, b_servicebinding, ab_servicebinding,
aid,
a_reply_to,
a_binding, b_binding_list, actionbinding, atype,
direction,
action_identifiers,
actions):
last_exception = ''
for b_binding in b_binding_list:
try:
bid = b_binding.get('id')
logging.info("Unifying {}-{} in {} and {} channels {} - {}".format(service,
action,
acppid,
bcppid,
aid,
bid))
self.check_action_replyto(service, action, a_reply_to, b_binding,
a_servicebinding, b_servicebinding)
try:
logging.info('Checking action binding level ACL')
action_binding_acl_check(a_binding, acpp,
b_binding, bcpp)
except UnificationException as e:
use = a_binding.get('use')
if use == 'optional' or use == None:
logging.info('ACL ignored for optional action binding {}'.format(e.value))
else:
logging.info('Exception for action binding {}'.format(e.value))
raise
else:
(acid, bcid) = self.unify_actionbinding_channels(acppid, acpp,
bcppid, bcpp,
service, action,
aid, a_binding,
bid, b_binding,
actionbinding,
atype)
(appid, bppid) = self.unify_actionbinding_payloadprofiles(acppid, acpp, bcppid, bcpp,
aid, bid,
service, action,
a_binding, b_binding, actionbinding,
direction)
self.unify_properties(aid, acpp, a_binding,
bid, bcpp, b_binding, actionbinding)
action_identifiers.append((acppid, aid, bcppid, bid))
actions.append(action)
logging.info("Successfully unified {}-{}: {} {} {} {} to {} {} {} {}".format(
service,
action,
acppid,
aid,
acid,
appid,
bcppid,
bid,
bcid,
bppid)
)
ab_servicebinding.append(actionbinding)
self.record_dependency((acppid, aid, bcppid, bid),
'channel',
(acppid, acid, bcppid, bcid))
except UnificationException as e:
last_exception = e.value
# See if there is another b_binding on b_binding_list that unifies
else:
# The b_binding unified. Stop the iteration
return
# we only get here if there was no return from any of the b_bindings
raise UnificationException(last_exception)
def check_action_replyto(self, service, action, areplyTo,
b_binding, a_servicebinding, b_servicebinding):
b_reply_to = b_binding.get('replyTo')
if areplyTo is None and b_reply_to is not None \
or areplyTo is not None and b_reply_to is None:
raise UnificationException(
'Bindings {} {} inconsistent for replyTo presence'.format(service,
action))
if areplyTo is not None and b_reply_to is not None:
arsendbinding = a_servicebinding.xpath(
'child::cppa:ActionBinding[@id="{}"]'.format(areplyTo),
namespaces=self.NSMAP)[0]
brsendbinding = b_servicebinding.xpath(
'child::cppa:ActionBinding[@id="{}"]'.format(b_reply_to),
namespaces=self.NSMAP)[0]
araction = arsendbinding.get('action')
braction = brsendbinding.get('action')
if araction != braction:
raise UnificationException(
'Bindings {} replyTo to different actions {} {}'.format(service,
araction,
braction))
def check_b_servicebinding(self, covered_actions, b_servicebinding):
b_abinding_list = b_servicebinding.xpath('child::cppa:ActionBinding',
namespaces=self.NSMAP)
for b_abinding in b_abinding_list:
action = b_abinding.get('action')
if action in covered_actions:
logging.debug('{} found in covered action list'.format(action))
else:
use = b_abinding.get('use')
if use == 'optional':
logging.debug(
'{} not found in covered action list, but it is optional'.format(action)
)
else:
raise UnificationException(
'Required binding for action {} not matched'.format(action))
def unify_actionbinding_payloadprofiles(self, acppid, acpp, bcppid, bcpp, aid, bid,
service, action,
a_binding, b_binding, actionbinding,
direction):
logging.info(
"Unifying payload profiles in action bindings {} and {} for {} -- {}".format(aid, bid, service, action)
)
#appids = a_binding.xpath('@payloadProfileId') + a_binding.xpath('child::cppa:PayloadProfileId/text()',
# namespaces=self.NSMAP)
#bppids = b_binding.xpath('@payloadProfileId') + b_binding.xpath('child::cppa:PayloadProfileId/text()',
# namespaces=self.NSMAP)
appids = a_binding.xpath('child::cppa:PayloadProfileId/text()', namespaces=self.NSMAP)
bppids = b_binding.xpath('child::cppa:PayloadProfileId/text()', namespaces=self.NSMAP)
if appids != [] and bppids != []:
for (acounter, appid) in enumerate(appids):
for (bcounter, bppid) in enumerate(bppids):
logging.info("Attempting to unify {} #{} and {} #{}".format(appid,
acounter,
bppid,
bcounter))
try:
self.unify_payload_profile(acppid, acpp, bcppid, bcpp, appid, bppid, direction)
logging.info(
'Setting attribute to {} for {} {} {} {}'.format(
self.cppaid(acppid, appid, bcpp, bppid),
acppid,
appid,
bcppid,
bppid))
except UnificationException as e:
last_exception = e
logging.info("Failure to unify {} #{} and {} #{}: {}".format(appid,
acounter,
bppid,
bcounter,
e.value))
else:
cpa_ppid_el = lxml.etree.SubElement(actionbinding, cppa('PayloadProfileId'))
cpa_ppid_el.text = self.cppaid(acppid, appid, bcppid, bppid)
self.record_dependency((acppid, aid, bcppid, bid),
'payloadprofile',
(acppid, appid, bcppid, bppid))
return (appid, bppid)
# The following exception is only raised in no matching pair is found, meaning there
# been at least one exception
raise UnificationException(
'Payload profiles {} {} failed to unify: {}'.format(aid,
bid,
last_exception.value))
elif (appids != [] and bppids == []) or (appids == [] and bppids != []):
raise UnificationException('{} and {} inconsistent for payload profiles: {} vs {}'.format(aid,
bid,
appids,
bppids))
else:
return None, None
def unify_actionbinding_channels(self, acppid, acpp, bcppid, bcpp, service, action,
aid, a_binding, bid, b_binding, actionbinding, direction):
logging.info(
"Unifying channels in action bindings {} and {} for {} -- {}".format(aid, bid, service, action)
)
# Channels
achannelids = a_binding.xpath('child::cppa:ChannelId/text()',
namespaces=self.NSMAP)
bchannelids = b_binding.xpath('child::cppa:ChannelId/text()',
namespaces=self.NSMAP)
last_exception = None
for (acounter, acid) in enumerate(achannelids):
for (bcounter, bcid) in enumerate(bchannelids):
logging.info("Attempt to unify {} #{} and {} #{}".format(acid,
acounter,
bcid,
bcounter))
abchannelid = (acppid, acid, bcppid, bcid)
try:
logging.info("Attempting to unify channel {} for {}".format(bcounter,
abchannelid))
self.unify_channels(abchannelid, acpp, bcpp, direction, service, action)
except UnificationException as e:
last_exception = e
logging.info("Failure to unify {} #{} and {} #{}: {}".format(acid,
acounter,
bcid,
bcounter,
e.value))
else:
logging.info("Successfully unified {} #{} and {} #{}".format(acid,
acounter,
bcid,
bcounter))
acpachannelid = lxml.etree.SubElement(actionbinding, cppa('ChannelId'))
acpachannelid.text = self.cppaid(acppid, acid, bcppid, bcid)
return (acid, bcid)
raise UnificationException(
'Action bindings {} {} failed to unify: {}'.format(aid,
bid,
last_exception.value))
def unify_channels(self, abchannelid, acpp, bcpp, direction, service=None, action=None):
(acppid, acid, bcppid, bcid) = abchannelid
cached, result = self.memo(acppid,
acid,
bcppid,
bcid,
self.unify_channels_results,
self.unify_channels_exceptions,
service,
action)
if cached:
logging.info(
"Found cached channel for {} {} and {} {}".format(acppid, acid, bcppid, bcid,
service, action)
)
return result
try:
result = self.unify_channels_memo(acpp, bcpp, abchannelid, acid, bcid, direction,
service, action)
except UnificationException as e:
logging.info(
"Exception unifying channel for {} {} and {} {}: {}".format(acppid, acid,
bcppid, bcid,
e.value)
)
self.unify_channels_exceptions[acppid,
acid,
bcppid,
bcid] = e
raise
else:
self.unify_channels_results[acppid,
acid,
bcppid,
bcid] = result
logging.info("Unified channel for {} {} and {} {}".format(
acppid, acid, bcppid, bcid)
)
return result
def unify_channels_memo(self, acpp, bcpp, abchannelid,
a_channelid, b_channelid, direction, service=None, action=None):
(acppid, acid, bcppid, bcid) = abchannelid
logging.info("Unifying channel {} {} {} and {} {} {}".format(acppid,
acid,
a_channelid,
bcppid,
bcid,
b_channelid))
context = (acppid, a_channelid, bcppid, b_channelid)
try:
if a_channelid is b_channelid is None:
return context
elif a_channelid is None or b_channelid is None:
raise UnificationException(
'Missing channel {} versus {}'.format(a_channelid,
b_channelid))
else:
a_channel = acpp.xpath(
'descendant::node()[@id="{}"]'.format(a_channelid),
namespaces=self.NSMAP)[0]
b_channel = bcpp.xpath(
'descendant::node()[@id="{}"]'.format(b_channelid),
namespaces=self.NSMAP)[0]
if a_channel.tag == cppa('DelegationChannel') or \
b_channel.tag == cppa('DelegationChannel'):
ab_channel = lxml.etree.Element(cppa('DelegationChannel'),
id = self.cppaid(acppid,
a_channelid,
bcppid,
b_channelid))
if a_channel.tag != cppa('DelegationChannel'):
aparty = acpp.xpath('descendant::cppa:PartyId',
namespaces=_NSMAP)[0]
p1 = aparty.text
p2 = aparty.get('type')
p3 = acppid
else:
p1, p2, p3 = delegated_party_params(a_channel)
for i in a_channel:
ab_channel.append(deepcopy(i))
if b_channel.tag != cppa('DelegationChannel'):
bparty = bcpp.xpath('descendant::cppa:PartyId',
namespaces=_NSMAP)[0]
p4 = bparty.text
p5 = bparty.get('type')
p6 = acppid
else:
p4, p5, p6 = delegated_party_params(b_channel)
two = deepcopy(b_channel)
two[0].tag = cppa('CounterPartyId')
for i in two:
ab_channel.append(i)
if self.is_connected_to(p1,
p2,
p3,
service,
action,
direction,
p4,
p5,
p6):
return ab_channel
else:
raise UnificationException(
'Delegation failed for {} {}'.format(
a_channel.get('id'),
b_channel.get('id'))
)
elif a_channel.tag != b_channel.tag:
raise UnificationException(
'Incompatible channel types {} {}'.format(a_channel.tag,
b_channel.tag))
elif a_channel.tag not in self.channel_handlers:
raise UnificationException(
'Unsupported channel type {} {}'.format(a_channel.tag,
b_channel.tag))
else:
try:
handler = self.channel_handlers[a_channel.tag]
ab_channel = lxml.etree.Element(a_channel.tag)
abdxid = self.cppaid(acppid, a_channelid, bcppid, b_channelid)
ab_channel.set('id', abdxid)
return handler(acpp,
bcpp,
context,
a_channel,
b_channel,
ab_channel,
direction)
except UnificationException as e:
raise UnificationException(
'Mismatch in channel for protocol {}: {}'.format(a_channel.tag,
e.value))
except UnificationException as e:
te = 'Channel unification exception for {}: {}'.format(abchannelid, e.value)
raise UnificationException(te)
"""
Channel Bindings
"""
def unify_channel_descriptions(self, context, a_channel, b_channel, binding):
(acppid, axid, bcppid, bxid) = context
description = lxml.etree.SubElement(binding, cppa('Description'))
description.text = 'Channel formed from {}{} in {} and {}{} in {}'.format(
a_channel.get('id'),
get_description_value_if_present(a_channel),
acppid,
b_channel.get('id'),
get_description_value_if_present(b_channel),
bcppid)
description.set(xml('lang'), 'en')
def unify_channel_max_size(self, a_channel, b_channel, ab_channel):
self.unify_size_element(a_channel, b_channel, ab_channel, 'MaxSize')
def unify_size_element(self, a_channel, b_channel, ab_channel, element_name):
xpath_expression = 'child::cppa:{}'.format(element_name)
logging.debug('Querying for {}'.format(xpath_expression))
a_max_size = _apply_units(
a_channel.xpath(xpath_expression,
namespaces=self.NSMAP)
)
b_max_size = _apply_units(
b_channel.xpath(xpath_expression,
namespaces=self.NSMAP)
)
if a_max_size == b_max_size == 0:
logging.debug('a_max_size == b_max_size == 0')
else:
if a_max_size > b_max_size:
ab_max_size = b_max_size
else:
ab_max_size = a_max_size
if ab_max_size > 0:
ab_max_size_el = lxml.etree.SubElement(ab_channel,cppa(element_name))
ab_max_size_el.text = str(ab_max_size)
logging.info(
"Unifying {}, {} {} {}".format(element_name, a_max_size, b_max_size, ab_max_size))
else:
logging.info( "Not reporting 0 size " )
"""
Named Channel
"""
def unify_named_channel(self,
acpp,
bcpp,
context,
a_channel,
b_channel,
binding,
direction):
(acppid, axid, bcppid, bxid) = context
self.unify_channel_descriptions(context, a_channel, b_channel, binding)
self.unify_channel_max_size(a_channel, b_channel, binding)
self.unify_simple_subelement(a_channel, b_channel, binding,
'cppa', 'ChannelName')
self.resolve_certificate_ref(acpp, bcpp, context,
'cppa:SigningCertificateRef',
a_channel, b_channel, binding, direction, 'send')
self.resolve_certificate_ref(acpp, bcpp, context,
'cppa:EncryptionCertificateRef',
a_channel, b_channel, binding, direction, 'receive')
self.unify_signing_cert_and_anchor(acppid, acpp, bcppid, bcpp,
a_channel, b_channel, binding, direction)
self.unify_encryption_cert_and_anchor(acppid, acpp, bcppid, bcpp,
a_channel, b_channel, binding, direction)
a_transport_id = a_channel.get('transport')
b_transport_id = b_channel.get('transport')
self.unify_transport(acppid, acpp,
bcppid, bcpp,
a_transport_id, b_transport_id,
direction)
self.record_dependency(context, 'transport', (acppid, a_transport_id,
bcppid, b_transport_id))
ab_transport_id = self.cppaid(acppid,
a_transport_id,
bcppid,
b_transport_id)
binding.set('transport', ab_transport_id)
# to do: match Params
return binding
"""
EDIINT
"""
def unify_ediint_channel(self, acpp, bcpp, context,
a_channel, b_channel, binding, direction):
(acppid, acid, bcppid, bcid) = context
self.unify_as_response(acid, a_channel, bcid, b_channel, binding)
self.unify_channel_descriptions(context, a_channel, b_channel, binding)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'Signature',
self.unify_signature, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'Encryption',
self.unify_encryption, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'ReceiptHandling',
self.unify_receipt_handling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'Compression',
self.unify_compression, direction)
self.unify_transport_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
self.unify_package_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
return binding
"""
ebMS2
"""
def unify_ebms2_channel(self, acpp, bcpp, context,
a_channel, b_channel, binding, direction):
(acppid, acid, bcppid, bcid) = context
logging.info("Unifying ebMS2Channel for {} {}".format(acid, bcid))
self.unify_as_response(acid, a_channel, bcid, b_channel, binding)
self.unify_channel_descriptions(context, a_channel, b_channel, binding)
self.unify_transport_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
self.unify_package_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'ErrorHandling',
self.unify_error_handling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'ReceiptHandling',
self.unify_receipt_handling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'ebMS2ReliableMessaging',
self.unify_ebms2_reliable_messaging, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'ebMS2SecurityBinding',
self.unify_ebms2_security_binding, direction)
return binding
def unify_ebms2_reliable_messaging(self, acpp, bcpp, context,
ael, bel, abel, direction):
unify_atts(ael, bel, abel, strictatts=True)
self.unify_complex_subelement(acpp, bcpp, context, ael, bel, abel,
'cppa', 'DuplicateHandling',
self.unify_duplicate_handling)
self.unify_persist_duration(acpp, bcpp, context, ael, bel, abel, direction)
self.unify_retry_handling(acpp, bcpp, context, ael, bel, abel, direction)
def unify_ebms2_security_binding(self, acpp, bcpp, context,
asec, bsec, security, direction):
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'Signature',
self.unify_signature, direction)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'Encryption',
self.unify_encryption, direction)
"""
Web Services
"""
def unify_ws_channel(self, acpp, bcpp, context,
a_channel, b_channel, binding, direction):
(acppid, axid, bcppid, bxid) = context
self.unify_channel_descriptions(context, a_channel, b_channel, binding)
self.unify_simple_subelement(a_channel, b_channel, binding, 'cppa', 'SOAPVersion',
required=False)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'Addressing',
self.unify_ws_addressing, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'WSSecurityBinding',
self.unify_ws_security, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'WSReliableMessagingBinding',
self.unify_ws_reliable_messaging, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'ReceiptHandling',
self.unify_receipt_handling, direction)
return binding
"""
AMQP Channel
"""
def unify_amqp_channel(self, acpp, bcpp, context,
a_channel, b_channel, binding, direction):
(acppid, acid, bcppid, bcid) = context
self.unify_as_response(acid, a_channel, bcid, b_channel, binding)
self.unify_channel_descriptions(context, a_channel, b_channel, binding)
self.unify_transport_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
self.unify_package_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
return binding
"""
Delegation Channels
"""
def unify_delegation_channel(self, acpp, bcpp, context,
a_channel, b_channel, binding, direction):
self.unify_simple_subelement(a_channel, b_channel, binding, 'cppa', 'PartyId',
required=False)
self.unify_simple_subelement(a_channel, b_channel, binding, 'cppa', 'ProfileIdentifier',
required=False)
return binding
"""
Transport Channels
"""
def unify_transport_channel(self, acpp, bcpp, context,
a_channel, b_channel, binding, direction):
(acppid, acid, bcppid, bcid) = context
self.unify_transport_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, binding, direction)
self.unify_channel_descriptions(context, a_channel, b_channel, binding)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, binding,
'cppa', 'RequestChannelID',
self.unify_request_channel_id, direction)
return binding
def unify_request_channel_id(self, acpp, bcpp, context,
arerc, brerc, abrerc, direction):
(acppid, axid, bcppid, bxid) = context
logging.info('unify_RequestChannelId for {} {}'.format(context, arerc))
arercid = arerc.text
brercid = brerc.text
self.unify_channels((acppid, arercid, bcppid, brercid), acpp, bcpp, None)
logging.info("Unified RequestChannelId {} {}".format(arercid, brercid))
abrerc.text = self.cppaid(acppid, arercid, bcppid, brercid)
self.record_dependency(context, 'channel', (acppid, arercid, bcppid, brercid))
"""
ebMS3 and AS4
"""
def unify_ebms3_channel(self, acpp, bcpp, context,
a_channel, b_channel, ebmsbinding,
direction):
(acppid, acid, bcppid, bcid) = context
unify_and_set_att(a_channel, b_channel, ebmsbinding, 'actorOrRole')
self.unify_as_response(acid, a_channel, bcid, b_channel, ebmsbinding)
self.unify_channel_descriptions(context, a_channel, b_channel, ebmsbinding)
self.unify_simple_subelement(a_channel, b_channel, ebmsbinding,
'cppa', 'ChannelProfile', required=False)
logging.info("Unifying ebMS3Channel for {} {}".format(acid, bcid))
self.unify_mpc(context, a_channel, b_channel, ebmsbinding, direction)
self.unify_simple_subelement(a_channel, b_channel, ebmsbinding,
'cppa', 'SOAPVersion', required=False)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'Addressing',
self.unify_ws_addressing, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'WSSecurityBinding',
self.unify_ws_security, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'AS4ReceptionAwareness',
self.unify_as4_reception_awareness, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'ErrorHandling',
self.unify_error_handling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'ReceiptHandling',
self.unify_receipt_handling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'PullHandling',
self.unify_pull_handling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'Compression',
self.unify_compression, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'Bundling',
self.unify_bundling, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'Splitting',
self.unify_splitting, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_channel, b_channel, ebmsbinding,
'cppa', 'AlternateChannel',
self.unify_alternate_channel_id, direction)
self.unify_transport_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, ebmsbinding, direction)
self.unify_package_elements(acppid, acpp, bcppid, bcpp, a_channel, b_channel,
context, ebmsbinding, direction)
logging.info("Unified ebMS3Channel for {} {}".format(acid, bcid))
return ebmsbinding
def unify_mpc(self, context, a_channel, b_channel, ebmsbinding, direction):
(acppid, acid, bcppid, bcid) = context
logging.info('Unify MPC for {}, {}'.format(acid, bcid))
for mpcatt in ['mpc', 'submpcext']:
ampc = a_channel.get(mpcatt)
bmpc = b_channel.get(mpcatt)
a_as_response = a_channel.get('asResponse')
b_as_response = b_channel.get('asResponse')
if ampc == bmpc is None:
pass
elif ampc == bmpc:
# MPC specified on both sides with same value: reuse value
ebmsbinding.set(mpcatt, ampc)
elif ampc is not None and bmpc is not None and ampc != bmpc:
# MPC specified on both sides with conflicting value: unification fails
raise UnificationException(
'Incompatible (sub)MPC {}-{} versus {}-{}'.format(acid,
ampc,
bcid,
bmpc))
else:
# The MPC is specified on one side only
logging.info(' {} {} {}'.format(a_as_response, b_as_response, direction))
if direction == 'send' and xsd_boolean(a_as_response) is not False:
# Message is transmitted over backchannel of a Pull request
if ampc is not None and bmpc is None:
ebmsbinding.set(mpcatt, ampc)
else:
raise UnificationException(
'Pull client cannot set (sub)MPC for server {} {}'.format(acid,
bcid))
elif direction == 'receive' and xsd_boolean(b_as_response) is not False:
# Message is transmitted over backchannel of a Pull request
if ampc is None and bmpc is not None:
ebmsbinding.set(mpcatt, bmpc)
else:
raise UnificationException(
'Pull client cannot set MPC for server {} {}'.format(acid,
bcid))
def unify_as_response(self, aid, a_binding, bid, b_binding, ab_binding):
aval = a_binding.get('asResponse')
bval = b_binding.get('asResponse')
if xsd_boolean(aval) is xsd_boolean(bval):
# both are set with the same Boolean (true or false) value or both are None
if xsd_boolean(aval) is True:
# they are set to true
ab_binding.set('asResponse', 'true')
else:
# both are False or None
# donothing, default is false
pass
elif xsd_boolean(aval) is True:
if xsd_boolean(bval) is None:
# A is set and is set to true, B is not set
ab_binding.set('asResponse', 'true')
else:
# B is False
raise UnificationException(
'Channels {} {} inconsistent for asResponse'.format(aid,
bid))
elif xsd_boolean(bval) is True:
if xsd_boolean(aval) is None:
# B is set and is set to true, A is not set
ab_binding.set('asResponse', 'true')
else:
# A is False
raise UnificationException(
'Channels {} {} inconsistent for asResponse'.format(aid,
bid))
"""
WS-Addressing, for ebMS3 Part 2 multihop feature
"""
def unify_ws_addressing(self, acpp, bcpp, context,
a_addressing, b_addressing, ab_addressing, direction):
self.unify_simple_subelement(a_addressing, b_addressing, ab_addressing,
'cppa', 'Endpoint', required=False)
self.unify_simple_subelement(a_addressing, b_addressing, ab_addressing,
'cppa', 'Action', required=True, strictatts=True)
self.unify_wsa_from(acpp, bcpp, context, a_addressing, b_addressing, ab_addressing, direction)
self.unify_complex_subelement(acpp, bcpp, context, a_addressing, b_addressing, ab_addressing,
'cppa', 'ebMS3InferredRoutingInput',
self.unify_routinginput, direction)
def unify_routinginput(self, acpp, bcpp, context,
a_addressing, b_addressing, ab_addressing, direction):
self.unify_simple_subelement(a_addressing, b_addressing, ab_addressing,
'cppa', 'ActionSuffix', required=False)
self.unify_simple_subelement(a_addressing, b_addressing, ab_addressing,
'cppa', 'MPCSuffix', required=False)
def unify_wsa_from(self, acpp, bcpp, context,
a_addressing, b_addressing, ab_addressing, direction):
if direction == 'send':
wsa_from_L = a_addressing.xpath('child::cppa:From',
namespaces=self.NSMAP)
else:
wsa_from_L = b_addressing.xpath('child::cppa:From',
namespaces=self.NSMAP)
if len(wsa_from_L) > 0:
ab_addressing.append(deepcopy(wsa_from_L[0]))
"""
Compression
"""
def unify_compression(self, acpp, bcpp, context,
a_compression, b_compression, ab_compression, direction):
self.unify_simple_subelement(a_compression, b_compression, ab_compression,
'cppa', 'CompressionAlgorithm',
intersectifmultiple=False, strictelements=False)
self.unify_boolean_subelement(a_compression, b_compression, ab_compression,
'cppa', 'CompressAttachments', required=False,
strictelements=True)
self.unify_boolean_subelement(a_compression, b_compression, ab_compression,
'cppa', 'CompressExternalPayloads', required=False,
strictelements=True)
"""
WS-Security
"""
def unify_ws_security(self, acpp, bcpp, context,
asec, bsec, security, direction):
#(acppid, axid, bcppid, bxid) = context
self.unify_simple_subelement(asec, bsec,security, 'cppa', 'WSSVersion', required=False)
self.unify_simple_subelement(asec, bsec,security, 'cppa', 'SecurityPolicy', required=False)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'SAMLKeyConfirmedSubjectToken',
self.unify_saml_key_confirmed_subject, direction)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'Signature',
self.unify_signature, direction)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'Encryption',
self.unify_encryption, direction)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'UserAuthentication',
self.unify_user_authentication, direction)
def unify_saml_key_confirmed_subject(self, acpp, bcpp, context,
asec, bsec, security, direction):
(acppid, axid, bcppid, bxid) = context
security.set('id', '{}_{}'.format(asec.get('id'), bsec.get('id')))
self.unify_simple_subelement(asec, bsec,security, 'cppa', 'SAMLVersion',
required=True)
self.unify_idp_registration_set_ref(acpp, bcpp, context,
asec, bsec, security, direction)
self.include_sender_signing_certificate_ref(acpp, bcpp, context,
asec, bsec, security, direction)
self.unify_saml_attributes(acpp, bcpp, context,
asec, bsec, security, direction)
self.unify_simple_subelement(asec, bsec,security, 'cppa', 'KeyType',
required=True)
def unify_saml_attributes(self, acpp, bcpp, context,
acppel, bcppel, abcpael, direction):
if direction is 'send':
receiver_attributeL = bcppel.xpath(
'child::cppa:SAMLAttribute',
namespaces=self.NSMAP
)
sender_attributeL = acppel.xpath(
'child::cppa:SAMLAttribute',
namespaces=self.NSMAP
)
else:
receiver_attributeL = acppel.xpath(
'child::cppa:SAMLAttribute',
namespaces=self.NSMAP
)
sender_attributeL = bcppel.xpath(
'child::cppa:SAMLAttribute',
namespaces=self.NSMAP
)
for receiver_attribute in receiver_attributeL:
attributename = receiver_attribute.get('Name')
sender_receiver_match = False
for sender_attribute in sender_attributeL:
if sender_attribute.get('Name') == attributename \
and sender_attribute.get('use') == 'required':
sender_receiver_match = True
if receiver_attribute.get('use') == 'required' \
and sender_receiver_match is False:
raise UnificationException(
'Required attribute {} not provided by sender'.format(attributename)
)
def include_sender_signing_certificate_ref(self, acpp, bcpp, context,
acppel, bcppel, abcpael, direction):
if direction == 'send':
signing_certL = acppel.xpath('child::cppa:SigningCertificateRef',
namespaces=self.NSMAP)
else:
signing_certL = bcppel.xpath('child::cppa:SigningCertificateRef',
namespaces=self.NSMAP)
if len(signing_certL) > 0:
abcpael.append(deepcopy(signing_certL[0]))
def unify_idp_registration_set_ref(self, acpp, bcpp, context,
acppel, bcppel, abcpael, direction):
a_idp_set_refL = acppel.xpath('child::cppa:IDPRegistrationSetRef',
namespaces=self.NSMAP)
b_idp_set_refL = bcppel.xpath('child::cppa:IDPRegistrationSetRef',
namespaces=self.NSMAP)
if len(a_idp_set_refL) > 0:
a_idprs_ref = (a_idp_set_refL[0]).get('idpsetid')
a_idprs = acpp.xpath(
'descendant::cppa:IDPRegistrationSet[@id="{}"]/cppa:IDPRegistrationRef/@idp'.format(
a_idprs_ref),
namespaces=self.NSMAP)
if len(b_idp_set_refL) > 0:
b_idprs_ref = (b_idp_set_refL[0]).get('idpsetid')
b_idprs = bcpp.xpath(
'descendant::cppa:IDPRegistrationSet[@id="{}"]/cppa:IDPRegistrationRef/@idp'.format(
b_idprs_ref),
namespaces=self.NSMAP)
abcpael.append(deepcopy(b_idp_set_refL[0]))
idp_sets_intersect = False
for aidp in a_idprs:
a_provider = acpp.xpath(
'descendant::cppa:IDPRegistration[@id="{}"]/cppa:ProviderID/text()'.format(aidp),
namespaces=self.NSMAP)[0]
logging.info('P: {}'.format(a_provider))
for bidp in b_idprs:
b_provider = bcpp.xpath(
'descendant::cppa:IDPRegistration[@id="{}"]/cppa:ProviderID/text()'.format(bidp),
namespaces=self.NSMAP)[0]
if a_provider == b_provider:
if not idp_sets_intersect:
idp_sets_intersect = True
provideridel = lxml.etree.SubElement(abcpael, cppa('ProviderID'))
provideridel.text = a_provider
else:
logging.info('AIDP {} not found in B')
if not idp_sets_intersect:
raise UnificationException(
'Empty intersection of IDP sets {} and {}'.format(a_idprs_ref,
b_idprs_ref))
def unify_signature(self, acpp, bcpp, context,
asec, bsec, security, direction):
(acppid, axid, bcppid, bxid) = context
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'SignatureFormat', required=False,
intersectifmultiple=False, strictelements=False)
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'SignatureAlgorithm', required=False,
intersectifmultiple=False, strictelements=False)
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'DigestAlgorithm', required=False,
intersectifmultiple=False, strictelements=False)
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'CanonicalizationMethod', required=False,
intersectifmultiple=False, strictelements=False)
self.resolve_certificate_ref(acpp, bcpp, context,
'cppa:SigningCertificateRef',
asec, bsec, security, direction, 'send')
self.unify_signing_cert_and_anchor(acppid, acpp, bcppid, bcpp,
asec, bsec, security, direction)
self.unify_saml_token_ref(acpp, bcpp, context, asec, bsec, security, direction)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'SignElements',
self.unify_sign_elements, direction)
self.unify_boolean_subelement(asec, bsec,security,
'cppa', 'SignAttachments', required=False,
strictelements=True)
self.unify_boolean_subelement(asec, bsec,security,
'cppa', 'SignExternalPayloads',
required=False, strictelements=True)
def resolve_certificate_ref(self, acpp, bcpp, context, certificate_kind,
asec, bsec, security, direction, leading_direction):
xpath_local = 'child::{}'.format(certificate_kind)
xpath_default = 'descendant::cppa:CertificateDefaults/{}'.format(certificate_kind)
if direction == leading_direction:
local_certificate_list = asec.xpath(xpath_local,
namespaces=self.NSMAP)
default_certificate_list = acpp.xpath(xpath_default,
namespaces=self.NSMAP)
to_append_to = asec
else:
local_certificate_list = bsec.xpath(xpath_local,
namespaces=self.NSMAP)
default_certificate_list = bcpp.xpath(
xpath_default,
namespaces=self.NSMAP)
to_append_to = bsec
if len(local_certificate_list) > 0:
security.append(deepcopy(local_certificate_list[0]))
elif len(default_certificate_list) > 0:
security.append(deepcopy(default_certificate_list[0]))
to_append_to.append(deepcopy(default_certificate_list[0]))
def unify_saml_token_ref(self, acpp, bcpp, context,
acppel, bcppel, abcpael, direction):
a_saml_tokenL = acppel.xpath('child::cppa:SAMLTokenRef',
namespaces=self.NSMAP)
b_saml_tokenL = bcppel.xpath('child::cppa:SAMLTokenRef',
namespaces=self.NSMAP)
if len(a_saml_tokenL) != len(b_saml_tokenL):
raise UnificationException('Mismatch in child count for SAMLTokenRef')
elif len(a_saml_tokenL) == 1:
a_saml_token_id = (a_saml_tokenL[0]).get('tokenId')
b_saml_token_id = (b_saml_tokenL[0]).get('tokenId')
lxml.etree.SubElement(abcpael, cppa('SAMLTokenRef'),
tokenId= '{}_{}'.format(a_saml_token_id,
b_saml_token_id))
def unify_sign_elements(self, acpp, bcpp, context,
asec, bsec, security, direction):
self.unify_expressions(asec, bsec, security, context)
def unify_encryption(self, acpp, bcpp, context,
asec, bsec, security, direction):
(acppid, axid, bcppid, bxid) = context
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'KeyEncryption',
self.unify_key_encryption, direction)
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'EncryptionAlgorithm',
required=False,
intersectifmultiple=True, strictelements=False)
self.unify_complex_subelement(acpp, bcpp, context, asec, bsec, security,
'cppa', 'EncryptElements',
self.unify_encrypt_elements, direction)
self.unify_boolean_subelement(asec, bsec,security,
'cppa', 'EncryptAttachments', required=False,
strictelements=True)
self.unify_boolean_subelement(asec, bsec,security,
'cppa', 'EncryptExternalPayloads', required=False,
strictelements=True)
self.resolve_certificate_ref(acpp, bcpp, context,
'cppa:EncryptionCertificateRef',
asec, bsec, security, direction, 'receive')
self.unify_encryption_cert_and_anchor(acppid, acpp, bcppid, bcpp, asec, bsec,
security, direction)
def unify_key_encryption(self, acpp, bcpp, context,
asec, bsec, security, direction):
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'EncryptionAlgorithm',
required=False,
intersectifmultiple=False, strictelements=False)
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'MaskGenerationFunction',
required=False,
intersectifmultiple=False, strictelements=False)
self.unify_simple_subelement(asec, bsec,security,
'cppa', 'DigestAlgorithm',
required=False,
intersectifmultiple=False, strictelements=False)
def unify_encrypt_elements(self, acpp, bcpp, context,
asec, bsec, security, direction):
self.unify_expressions(asec, bsec, security, context)
def unify_expressions(self, asec, bsec, security, context):
(acppid, axid, bcppid, bxid) = context
a_expressions_list = sorted(asec.xpath('child::cppa:Expression/text()',
namespaces=self.NSMAP))
b_expressions_list = sorted(bsec.xpath('child::cppa:Expression/text()',
namespaces=self.NSMAP))
if len(a_expressions_list) != len(b_expressions_list):
return UnificationException(
'Unequal number of expression in {} {}'.format(acppid,
bcppid))
else:
for counter, a_expr in enumerate(a_expressions_list):
b_expr = b_expressions_list[counter]
if a_expr != b_expr:
raise UnificationException(
'Mismatch in expression: {} {}'.format(a_expr,
b_expr))
else:
expression = lxml.etree.SubElement(security, cppa('Expression'))
expression.text = a_expr
"""
AMQP Security
"""
def unify_amqp_security(self, acpp, bcpp, context,
a_amqp_security, b_amqp_security, ab_amqp_security, direction):
self.unify_simple_subelement(a_amqp_security, b_amqp_security, ab_amqp_security, 'cppa', 'SASLMechanism',
required=True, strictelements=False)
self.unify_complex_subelement(acpp, bcpp, context,
a_amqp_security, b_amqp_security, ab_amqp_security,
'cppa', 'TransportLayerSecurity',
self.unify_transport_layer_security, direction)
"""
Reliable Messaging
"""
def unify_as4_reception_awareness(self, acpp, bcpp, context,
ael, bel, abel, direction):
self.unify_complex_subelement(acpp, bcpp, context,
ael, bel, abel,
'cppa', 'DuplicateHandling',
self.unify_duplicate_handling,
direction)
self.unify_retry_handling(acpp, bcpp, context,
ael, bel, abel, direction)
def unify_duplicate_handling(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, axid, bcppid, bxid) = context
self.unify_boolean_subelement(ael, bel,abel,
'cppa', 'DuplicateElimination',
required=True, strictelements=False)
self.unify_persist_duration(acpp, bcpp, context,
ael, bel, abel, direction)
def unify_persist_duration(self, acpp, bcpp, context,
ael, bel, abel, direction):
"""
The PersistDuration of the receiver is used
"""
(acppid, axid, bcppid, bxid) = context
if direction == "send":
self.unify_persist_duration_send(acppid, bcppid, ael, bel, abel)
else:
self.unify_persist_duration_send(acppid, bcppid, bel, ael, abel)
def unify_persist_duration_send(self, acppid, bcppid, ael, bel, abel):
b_persistdurationl = bel.xpath('child::cppa:PersistDuration',
namespaces=self.NSMAP)
if len(b_persistdurationl) > 0:
abel.append(self.c14n(deepcopy(b_persistdurationl[0])))
def unify_retry_handling(self, acpp, bcpp, context,
ael, bel, abel, direction):
"""
Retries are handled by the sender, so the configuration for the CPA is
based on the configuration of the sender.
@@@ for consideration: add checks that the last possible retry is within
the persist duration interval
"""
(acppid, axid, bcppid, bxid) = context
if direction == "send":
self.unify_retry_handling_send(acppid, bcppid, ael, bel, abel)
else:
self.unify_retry_handling_send(acppid, bcppid, bel, ael, abel)
def unify_retry_handling_send(self, acppid, bcppid, ael, bel, abel):
a_RetryHandlingL = ael.xpath('child::cppa:RetryHandling',
namespaces=self.NSMAP)
if len(a_RetryHandlingL) > 0:
abel.append(self.c14n(a_RetryHandlingL[0]))
def unify_ws_reliable_messaging(self, acpp, bcpp, context,
ael, bel, parent, direction):
self.unify_complex_subelement(acpp, bcpp, context,
ael, bel, parent,
'cppa', 'DuplicateHandling',
self.unify_duplicate_handling)
self.unify_retry_handling(acpp, bcpp, context,
ael, bel, parent, direction)
self.unify_simple_subelement(ael, bel, parent,
'cppa', 'Protocol',
required = True,
strictelements=False)
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'AckOnDelivery',
required = False,
strictelements=False)
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'InOrder',
required = False,
strictelements=False)
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'StartGroup',
required = False,
strictelements=False)
self.unify_complex_subelement(acpp, bcpp, context,
ael, bel, parent,
'cppa', 'Correlation',
self.unify_rm_correlation,
direction)
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'TerminateGroup',
required = False,
strictelements=False)
def unify_rm_correlation(self, acpp, bcpp, context,
ael, bel, abel, direction):
self.unify_expressions(ael, bel, abel, context)
"""
Error Handling
"""
def unify_error_handling(self, acpp, bcpp, context,
ael, bel, parent, direction):
logging.info("Unifying ErrorHandling for {}".format(ael))
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'DeliveryFailuresNotifyProducer',
required = False,
strictelements=False)
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'ProcessErrorNotifyConsumer',
required = False,
strictelements = False)
self.unify_boolean_subelement(ael, bel, parent,
'cppa', 'ProcessErrorNotifyProducer',
required=False,
strictelements=False)
self.unify_complex_subelement(acpp, bcpp, context,
ael, bel, parent,
'cppa', 'ReceiverErrorsReportChannelId',
self.unify_receiver_errors_report_channel_id,
reverse(direction))
def unify_receiver_errors_report_channel_id(self, acpp, bcpp, context,
arerc, brerc, abrerc, direction):
(acppid, axid, bcppid, bxid) = context
logging.info(
'unify_ReceiverErrorsReportChannelId for {} {}'.format(
context, arerc)
)
arercid = arerc.text
brercid = brerc.text
logging.info(
"Attempting to unify ReceiverErrorsReport channels {} with {}".format(
arercid, brercid)
)
self.unify_channels((acppid, arercid, bcppid, brercid), acpp, bcpp, direction)
logging.info("Unified ReceiverErrorsReportChannelId {} {}".format(
arercid, brercid)
)
abrerc.text = self.cppaid(acppid, arercid, bcppid, brercid)
self.record_dependency(context, 'channel', (acppid, arercid, bcppid, brercid))
"""
Receipt Handling
"""
def unify_receipt_handling(self, acpp, bcpp, context,
ael, bel, parent, direction):
logging.info("Unifying ReceiptHandling for {}".format(ael))
self.unify_simple_subelement(ael, bel, parent,
'cppa', 'ReceiptFormat',
required=False, strictelements=True,
intersectifmultiple=False)
self.unify_complex_subelement(acpp, bcpp, context, ael, bel, parent,
'cppa', 'ReceiptChannelId',
self.unify_receipt_channel_id, direction)
def unify_receipt_channel_id(self, acpp, bcpp, context,
arerc, brerc, abrerc, direction):
(acppid, axid, bcppid, bxid) = context
logging.info('unify_ReceiptChannelId for {} {}'.format(context, arerc))
arercid = arerc.text
brercid = brerc.text
self.unify_channels((acppid, arercid, bcppid, brercid), acpp, bcpp, reverse(direction))
logging.info("Unified ReceiptChannelId {} {}".format(arercid, brercid))
abrerc.text = self.cppaid(acppid, arercid, bcppid, brercid)
self.record_dependency(context, 'channel', (acppid, arercid, bcppid, brercid))
"""
self.unify_pull_handling
"""
def unify_pull_handling(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, axid, bcppid, bxid) = context
logging.info('unify_pull_channel_id for {} {}'.format(context, ael))
self.unify_complex_subelement(acpp, bcpp, context, ael, bel, abel,
'cppa', 'PullChannelId',
self.unify_pull_channel_id, direction)
"""
self.unify_pull_channel_id
"""
def unify_pull_channel_id(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, axid, bcppid, bxid) = context
logging.info('unify_pull_channel_id for {} {}'.format(context, ael))
apullchannelid = ael.text
bpullchannelid = bel.text
logging.info(
"Attempting to unify Pull channels {} and {}".format(apullchannelid,
bpullchannelid)
)
self.unify_channels((acppid, apullchannelid, bcppid, bpullchannelid),
acpp, bcpp, reverse(direction))
logging.info("Unified PullChannelId {} {}".format(apullchannelid,
bpullchannelid))
abel.text = self.cppaid(acppid, apullchannelid, bcppid, bpullchannelid)
self.record_dependency(context, 'channel', (acppid,
apullchannelid,
bcppid,
bpullchannelid))
"""
self.unify_bundling
"""
def unify_bundling(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, axid, bcppid, bxid) = context
self.unify_complex_subelement(acpp, bcpp, context, ael, bel, abel,
'cppa', 'Ordering',
self.unify_bundling_ordering, direction)
def unify_bundling_ordering(self, acpp, bcpp, context,
ael, bel, abel, direction):
self.unify_simple_subelement(ael, bel, abel,
'cppa', 'Policy',
required=True)
"""
self.unify_splitting
"""
def unify_splitting(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, axid, bcppid, bxid) = context
self.unify_size_element(ael, bel, abel, 'FragmentSize')
self.unify_properties(axid, acpp, ael,
bxid, bcpp, bel, abel)
self.unify_simple_subelement(ael, bel, abel,
'cppa', 'CompressionAlgorithm',
intersectifmultiple=False, strictelements=False, required=False)
if direction == 'send':
splitting_interval_list = bel.xpath('child::cppa:JoinInterval',
namespaces=self.NSMAP)
else:
splitting_interval_list = ael.xpath('child::cppa:JoinInterval',
namespaces=self.NSMAP)
if len(splitting_interval_list) > 0:
abel.append(deepcopy(splitting_interval_list[0]))
"""
self.unify_alternate_channel_id
"""
def unify_alternate_channel_id(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, axid, bcppid, bxid) = context
logging.info('unify_alternate_channel_id for {} {}'.format(context, ael))
aaltchannelid = ael.text
baltchannelid = bel.text
logging.info("Attempting to unify Alternate channels {} and {}".format(
aaltchannelid, baltchannelid)
)
self.unify_channels((acppid, aaltchannelid, bcppid, baltchannelid),
acpp, bcpp, direction)
logging.info("Unified AlternateChannelId {} {}".format(aaltchannelid,
baltchannelid))
abel.text = self.cppaid(acppid, aaltchannelid, bcppid, baltchannelid)
self.record_dependency(context, 'channel', (acppid,
aaltchannelid,
bcppid,
baltchannelid))
"""
Certificates
"""
def unify_signing_cert_and_anchor(self, acppid, acpp, bcppid, bcpp,
acppel, bcppel, security, direction):
if direction == "send":
self.unify_signing_cert_and_anchor_send(acppid, acpp, bcppid, bcpp,
acppel, bcppel, security)
else:
self.unify_signing_cert_and_anchor_send(bcppid, bcpp, acppid, acpp,
bcppel, acppel, security)
def unify_signing_cert_and_anchor_send(self, acppid, acpp, bcppid, bcpp,
acppel, bcppel, security):
a_signing_certL = acppel.xpath('child::cppa:SigningCertificateRef',
namespaces=self.NSMAP)
b_signing_anchorL = bcppel.xpath('child::cppa:SigningTrustAnchorSetRef',
namespaces=self.NSMAP)
required = self.certificate_required(bcppel, 'Signing', False)
if len(a_signing_certL) == 1 and len(b_signing_anchorL) == 1:
signingcertid = a_signing_certL[0].get('certId')
banchorid = b_signing_anchorL[0].get('certId')
logging.info('Checking if cert {} matches anchors {}'.format(signingcertid,
banchorid))
acert = acpp.xpath('cppa:PartyInfo/cppa:Certificate[@id="{}"]'.format(signingcertid),
namespaces=self.NSMAP)[0]
banchor = bcpp.xpath('cppa:PartyInfo/cppa:TrustAnchorSet[@id="{}"]'.format(banchorid),
namespaces=self.NSMAP)[0]
ax509certL = acert.xpath('descendant-or-self::ds:X509Certificate',
namespaces=self.NSMAP)
self.unify_cert_and_anchor(signingcertid, ax509certL, banchorid, banchor, bcpp, required)
elif len(b_signing_anchorL) == 1:
logging.info('A signing anchor specified, but no cert')
security.append(deepcopy(b_signing_anchorL[0]))
if required:
raise UnificationException('A signing certificate is required, but not presented')
elif len(a_signing_certL) == 1:
logging.info('A signing cert specified, but no anchor')
else:
logging.info('No signing anchor and/or cert specified')
def unify_encryption_cert_and_anchor(self, acppid, acpp, bcppid, bcpp,
acppel, bcppel, parent, direction):
if direction == "send":
self.unify_encryption_cert_and_anchor_send(acppid, acpp, bcppid, bcpp,
acppel, bcppel, parent)
else:
self.unify_encryption_cert_and_anchor_send(bcppid, bcpp, acppid, acpp,
bcppel, acppel, parent)
def unify_encryption_cert_and_anchor_send(self, acppid, acpp, bcppid, bcpp,
acppel, bcppel, parent):
a_encryption_anchorL = acppel.xpath('child::cppa:EncryptionTrustAnchorSetRef',
namespaces=self.NSMAP)
b_encryption_certL = bcppel.xpath('child::cppa:EncryptionCertificateRef',
namespaces=self.NSMAP)
required = self.certificate_required(acppel, 'Encryption', True)
if len(a_encryption_anchorL) == 1 and len(b_encryption_certL) == 1:
aanchorid = a_encryption_anchorL[0].get('certId')
encryptioncertid = b_encryption_certL[0].get('certId')
logging.info('Checking if cert {} matches anchors {}'.format(encryptioncertid,
aanchorid))
aanchor = acpp.xpath(
'cppa:PartyInfo/cppa:TrustAnchorSet[@id="{}"]'.format(aanchorid),
namespaces=self.NSMAP)[0]
bcert = bcpp.xpath(
'cppa:PartyInfo/cppa:Certificate[@id="{}"]'.format(encryptioncertid),
namespaces=self.NSMAP)[0]
bx509certl = bcert.xpath('descendant-or-self::ds:X509Certificate',
namespaces=self.NSMAP)
if len(bx509certl) > 0:
bx509rootcert = remove_all_whitespace(bx509certl[-1].text)
logging.debug('Root cert is {} ... {} (len: {})'.format(bx509rootcert[0:6],
bx509rootcert[-6:],
len(bx509rootcert)))
self.unify_cert_and_anchor(encryptioncertid, bx509certl, aanchorid, aanchor, acpp)
elif len(a_encryption_anchorL) == 1:
logging.info('An encryption anchor specified, but no cert')
parent.append(deepcopy(a_encryption_anchorL[0]))
if required:
raise UnificationException('An encryption certificate is required, but not presented')
elif len(b_encryption_certL) == 1:
logging.info('An encryption cert specified, but no anchor')
else:
logging.info('No encryption anchor/cert specified')
def unify_cert_and_anchor(self, signingcertid, ax509certL, banchorid, banchor, bcpp, required=None):
rootfound = False
if len(ax509certL) > 0:
ax509rootcert = remove_all_whitespace(ax509certL[-1].text)
logging.debug('Root cert is {} ... {} (len: {})'.format(ax509rootcert[0:6],
ax509rootcert[-6:],
len(ax509rootcert)))
rootfound = False
for b_anchor_ref in banchor.xpath('cppa:AnchorCertificateRef',
namespaces=self.NSMAP):
b_anchor_certid = b_anchor_ref.get('certId')
if check_x509_data_content(signingcertid,
ax509rootcert,
b_anchor_certid,
bcpp):
rootfound = True
if not rootfound:
for embedded_cert in banchor.xpath('cppa:Certificate',
namespaces=self.NSMAP):
certid = embedded_cert.get('id')
if check_x509_data_content_2(signingcertid,
ax509rootcert,
certid,
embedded_cert,
bcpp):
rootfound = True
else:
logging.warning('Empty ax509certL for {}'.format(signingcertid))
if not rootfound:
raise UnificationException(
'Cert {} does not match a root cert in {}'.format(signingcertid,
banchorid)
)
"""
User Authentication
A username and password are generated as part of CPA formation
"""
def unify_user_authentication(self, acpp, bcpp, context,
ael, bel, abel, direction):
(acppid, aelid, bcppid, belid) = context
usernameel = lxml.etree.SubElement(abel, cppa('Username'))
usernameel.text = create_username(acppid, aelid, bcppid, belid)
passwordel = lxml.etree.SubElement(abel, cppa('Password'))
passwordel.text = create_random_password()
self.unify_boolean_subelement(ael, bel, abel,
'cppa', 'Digest', required=False)
self.unify_boolean_subelement(ael, bel, abel,
'cppa', 'Nonce', required=False)
self.unify_boolean_subelement(ael, bel, abel,
'cppa', 'Created', required=False)
"""
Transport
"""
def unify_transport_elements(self, acppid, acpp, bcppid, bcpp,
a_channel, b_channel, context, binding,
direction):
atid = a_channel.get('transport')
btid = b_channel.get('transport')
if atid is not None and btid is not None:
self.unify_transport(acppid, acpp,
bcppid, bcpp,
atid,
btid,
direction)
abtid = self.cppaid(acppid, atid, bcppid, btid)
binding.set('transport', abtid)
self.record_dependency(context, 'transport', (acppid, atid, bcppid, btid))
elif (atid is None and btid is not None) or (btid is None and atid is not None):
raise UnificationException(
'Element {} and {} inconsistent for transport'.format(a_channel.get('id'),
b_channel.get('id'))
)
def unify_transport(self, acppid, acpp, bcppid, bcpp,
atid, btid, direction):
cached, result = self.memo(acppid,
atid,
bcppid,
btid,
self.unify_transport_results,
self.unify_transport_exceptions)
if cached:
return result
try:
result = self.unify_transport_memo(acppid, acpp, bcppid, bcpp,
atid, btid, direction)
except UnificationException as e:
self.unify_transport_exceptions[acppid, atid, bcppid, btid] = e
raise
else:
self.unify_transport_results[acppid, atid, bcppid, btid] = result
return result
def unify_transport_memo(self, acppid, acpp, bcppid, bcpp,
atid, btid, direction):
try:
if atid is None and btid is None:
logging.info("No transport, OK")
return (acppid, bcppid, atid, btid)
elif atid is None or btid is None:
raise Exception('Missing transport {} or {}'.format(atid, btid))
else:
atransport = acpp.xpath('descendant::node()[@id="{}"]'.format(atid),
namespaces=self.NSMAP)[0]
btransport = bcpp.xpath('descendant::node()[@id="{}"]'.format(btid),
namespaces=self.NSMAP)[0]
if atransport.tag != btransport.tag:
raise UnificationException(
'Mismatch in transport type: {} vs {}'.format(atransport.tag,
btransport.tag))
abtransport = lxml.etree.Element(atransport.tag,
id=self.cppaid(acppid,
atransport.get('id'),
bcppid,
btransport.get('id')),
nsmap=self.NSMAP)
description = lxml.etree.SubElement(abtransport, cppa('Description'))
description.set(xml('lang'),'en')
description.text = 'Transport formed from {} in {} and {} in {}'.format(atid,
acppid,
btid,
bcppid)
self.unify_transport_method(atransport.tag,
atid,
atransport,
btid,
btransport,
abtransport)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'ClientIPv4',
required=False, strictelements=False,
intersectifmultiple=False)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'ClientIPv6',
required=False, strictelements=False)
self.unify_ip_versions(atid, atransport, btid, btransport, abtransport)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'Endpoint',
required=True, strictelements=False)
self.unify_complex_subelement(acpp, bcpp, (acppid, atid, bcppid, btid),
atransport, btransport,
abtransport,
'cppa', 'TransportLayerSecurity',
self.unify_transport_layer_security, direction)
self.unify_complex_subelement(acpp, bcpp, (acppid, atid, bcppid, btid),
atransport, btransport,
abtransport,
'cppa', 'UserAuthentication',
self.unify_user_authentication, direction)
self.unify_complex_subelement(acpp, bcpp, (acppid, atid, bcppid, btid),
atransport, btransport,
abtransport,
'cppa', 'TransportRestart',
self.unify_transport_restart, direction)
# For HTTP
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'HTTPVersion',
required=False, strictelements=False,
intersectifmultiple=True)
self.unify_boolean_subelement(atransport, btransport, abtransport,
'cppa', 'ChunkedTransferCoding',
strictelements=False,
required=False)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'ContentCoding',
required=False, strictelements=False,
intersectifmultiple=True)
# For SMTP
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'From',
required=False, strictelements=False)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'To',
required=False, strictelements=False)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'Subject',
required=False, strictelements=False)
# For WebSocket
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'SubProtocol',
required=False, strictelements=False)
# For AMQP
self.unify_complex_subelement(acpp, bcpp, (acppid, atid, bcppid, btid),
atransport, btransport,
abtransport,
'cppa', 'AMQPSecurity',
self.unify_amqp_security, direction)
# For SFTP
self.unify_complex_subelement(acpp, bcpp, (acppid, atid, bcppid, btid),
atransport, btransport,
abtransport,
'cppa', 'Compression',
self.unify_compression, direction)
self.unify_ssh_keys(acppid, acpp, bcppid, bcpp,
atransport, btransport, abtransport, direction)
#(self, acppid, acpp, bcppid, bcpp,
# acppel, bcppel, parent, direction)
self.unify_simple_subelement(atransport, btransport, abtransport,
'cppa', 'SSHCipher',
required=False, strictelements=False,
intersectifmultiple=False)
return abtransport
except UnificationException as e:
raise UnificationException('Transport {} {}: {}'.format(atid, btid, e))
def unify_ip_versions(self, atid, atransport, btid, btransport, abtransport):
if ( xsd_boolean(atransport.get('supportsIPv4')) is False \
and xsd_boolean(btransport.get('supportsIPv6')) is False ) \
or \
( xsd_boolean(btransport.get('supportsIPv4')) is False \
and xsd_boolean(atransport.get('supportsIPv6')) is False ):
raise UnificationException(
'Transport {} {} are on incompatible IP versions'.format(
atid, btid
))
else:
for transport in [atransport, btransport]:
for ip_version in ['supportsIPv4', 'supportsIPv6']:
if xsd_boolean(transport.get(ip_version)) is False:
abtransport.set(ip_version, 'false')
def unify_transport_method(self, protocol,
atid, atransport,
btid, btransport,
abtransport):
for transport in [atransport, btransport]:
if protocol == cppa('FTPTransport'):
if 'method' not in transport.attrib:
transport.set('method', 'PUT')
if protocol == cppa('HTTPTransport'):
if 'method' not in transport.attrib:
transport.set('method', 'POST')
if atransport.get('method') != btransport.get('method'):
raise UnificationException(
'Method mismatch for {} {}: {} vs {}'.format(atid,
btid,
atransport.get('method'),
btransport.get('method'))
)
elif 'method' in atransport.attrib:
abtransport.set('method', atransport.get('method'))
if protocol == cppa('FTPTransport') and abtransport.get('method') == 'PUT':
del abtransport.attrib['method']
elif protocol == cppa('HTTPTransport') and abtransport.get('method') == 'POST':
del abtransport.attrib['method']
def unify_transport_layer_security(self, acpp, bcpp, context,
atls, btls, abtls, direction):
if direction == "send":
self.unify_transport_layer_security_send(acpp, bcpp, context,
atls, btls, abtls)
else:
self.unify_transport_layer_security_send(bcpp, acpp, context,
btls, atls, abtls)
def unify_transport_layer_security_send(self, acpp, bcpp, context,
atls, btls, abtls):
(acppid, axid, bcppid, bxid) = context
logging.info('Unifying TransportLayerSecurity for {} {}'.format(axid,
bxid))
self.unify_boolean_subelement(atls, btls, abtls,
'cppa', 'StartTLS', required=False,
strictelements=True)
self.unify_simple_subelement(atls, btls, abtls,
'cppa', 'TLSProtocol',
required=False, strictelements=False)
self.unify_boolean_subelement(atls, btls, abtls,
'cppa', 'ServerNameIndicationRequired',
required=False, strictelements=True)
self.unify_simple_subelement(atls, btls, abtls,
'cppa', 'CipherSuite',
required=False,
intersectifmultiple=True,
strictelements=False)
self.resolve_certificate_ref(acpp, bcpp, context,
'cppa:ClientCertificateRef',
atls, btls, abtls, 'send', 'send')
self.resolve_certificate_ref(acpp, bcpp, context,
'cppa:ServerCertificateRef',
atls, btls, abtls, 'send', 'receive')
self.unify_tls_server_cert_and_anchor_send(acppid, acpp, bcppid, bcpp,
atls, btls, abtls)
self.unify_tls_client_cert_and_anchor_send(acppid, acpp, bcppid, bcpp,
atls, btls, abtls)
def unify_tls_server_cert_and_anchor_send(self, acppid, acpp, bcppid, bcpp, atls, btls, abtls):
a_server_anchorL = atls.xpath('child::cppa:ServerTrustAnchorSetRef',
namespaces=self.NSMAP)
b_server_certL = btls.xpath('child::cppa:ServerCertificateRef',
namespaces=self.NSMAP)
required = self.certificate_required(atls, 'Server', False)
if len(a_server_anchorL) == 1 and len(b_server_certL) == 1:
aanchorid = a_server_anchorL[0].get('certId')
server_certid = b_server_certL[0].get('certId')
logging.info(
'Checking if cert {} matches anchors {}'.format(
server_certid, aanchorid)
)
aanchor = acpp.xpath('cppa:PartyInfo/cppa:TrustAnchorSet[@id="{}"]'.format(aanchorid),
namespaces=self.NSMAP)[0]
bcert = bcpp.xpath('cppa:PartyInfo/cppa:Certificate[@id="{}"]'.format(server_certid),
namespaces=self.NSMAP)[0]
bx509certL = bcert.xpath('descendant-or-self::ds:X509Certificate',
namespaces=self.NSMAP)
self.unify_cert_and_anchor(server_certid, bx509certL, aanchorid, aanchor, acpp)
elif len(a_server_anchorL) == 1:
logging.info('A server anchor specified, but no cert')
abtls.append(deepcopy(a_server_anchorL[0]))
if required:
raise UnificationException('A server certificate is required, but not presented')
elif len(b_server_certL) == 1:
logging.info('A server cert specified, but no anchor')
else:
logging.info('No encryption anchor/cert specified')
def unify_tls_client_cert_and_anchor_send(self, acppid, acpp, bcppid, bcpp,
atls, btls, abtls):
a_client_certL = atls.xpath('child::cppa:ClientCertificateRef',
namespaces=self.NSMAP)
b_client_anchorL = btls.xpath('child::cppa:ClientTrustAnchorSetRef',
namespaces=self.NSMAP)
required = self.certificate_required(btls, 'Client', False)
if len(b_client_anchorL) == 1 and len(a_client_certL) == 1:
banchorid = b_client_anchorL[0].get('certId')
client_certid = a_client_certL[0].get('certId')
logging.info('Checking if cert {} matches anchors {}'.format(client_certid,
banchorid))
acert = acpp.xpath('cppa:PartyInfo/cppa:Certificate[@id="{}"]'.format(client_certid),
namespaces=self.NSMAP)[0]
banchor = bcpp.xpath(
'cppa:PartyInfo/cppa:TrustAnchorSet[@id="{}"]'.format(banchorid),
namespaces=self.NSMAP)[0]
ax509certL = acert.xpath('descendant-or-self::ds:X509Certificate',
namespaces=self.NSMAP)
self.unify_cert_and_anchor(client_certid, ax509certL, banchorid, banchor, bcpp)
elif len(b_client_anchorL) == 1:
logging.info('A server anchor specified, but no cert')
abtls.append(deepcopy(b_client_anchorL[0]))
if required:
raise UnificationException('A server certificate is required, but not presented')
elif len(a_client_certL) == 1:
logging.info('A server cert specified, but no anchor')
else:
logging.info('No encryption anchor/cert specified')
def unify_ssh_keys(self, acppid, acpp, bcppid, bcpp,
acppel, bcppel, parent, direction):
if direction == "send":
self.unify_ssh_keys_send(acppid, acpp, bcppid, bcpp,
acppel, bcppel, parent)
else:
self.unify_ssh_keys_send(bcppid, bcpp, acppid, acpp,
bcppel, acppel, parent)
def unify_ssh_keys_send(self, acppid, acpp, bcppid, bcpp,
acppel, bcppel, parent):
a_key_refs = acppel.xpath('child::cppa:SSHClientKeyRef',
namespaces=self.NSMAP)
b_key_refs = bcppel.xpath('child::cppa:SSHServerKeyRef',
namespaces=self.NSMAP)
if len(a_key_refs) > 0:
parent.append( deepcopy(a_key_refs[0]))
if len(b_key_refs) > 0:
parent.append( deepcopy(b_key_refs[0]))
def unify_transport_restart(self, acpp, bcpp, context,
a, b, ab, direction):
self.unify_simple_subelement(a, b, ab,
'cppa', 'RestartProtocol',
required=True, strictelements=True)
if direction == 'send':
receiver_interval_L = b.xpath('child::cppa:RestartInterval',
namespaces=self.NSMAP)
else:
receiver_interval_L = a.xpath('child::cppa:RestartInterval',
namespaces=self.NSMAP)
if len(receiver_interval_L) > 0:
ab.append(deepcopy(receiver_interval_L[0]))
"""
Properties
Unify properties in two action bindings or in AMQP property definitions.
Checks that the two bindings have the same number of properties and that they
match pairwise.
"""
def unify_properties(self, aid, acpp, a_binding,
bid, bcpp, b_binding, actionbinding):
if a_binding.get('propertySetId') == b_binding.get('propertySetId') == None:
a_property_list = a_binding.xpath('child::cppa:Property',
namespaces=self.NSMAP)
b_property_list = b_binding.xpath('child::cppa:Property',
namespaces=self.NSMAP)
b_parent = b_binding
else:
if a_binding.get('propertySetId') != None:
a_property_list = acpp.xpath(
"cppa:PropertySet[@id='{}']/child::cppa:Property".format(
a_binding.get('propertySetId')),
namespaces=self.NSMAP)
else:
a_property_list = a_binding.xpath(
'child::cppa:Property',
namespaces=self.NSMAP)
if b_binding.get('propertySetId') != None:
b_property_list = bcpp.xpath(
"cppa:PropertySet[@id='{}']/child::cppa:Property".format(
b_binding.get('propertySetId')),
namespaces=self.NSMAP)
b_parent = bcpp.xpath(
"cppa:PropertySet[@id='{}']".format(
b_binding.get('propertySetId')),
namespaces=self.NSMAP)[0]
else:
b_property_list = b_binding.xpath('child::cppa:Property',
namespaces=self.NSMAP)
b_parent = b_binding
if len(a_property_list) != len(b_property_list):
raise UnificationException(
'Unequal number of properties for {}, {}'.format(aid, bid))
else:
#xpq = 'child::cppa:Property[@name="{}" and @minOccurs="{}" and @maxOccurs="{}"]'
xpq = 'child::cppa:Property[@name="{}"]'
for aprop in a_property_list:
aname = aprop.get('name')
a_min = aprop.get('minOccurs')
a_max = aprop.get('maxOccurs')
bpropl = b_parent.xpath(xpq.format(aname,a_min,a_max),
namespaces=self.NSMAP)
if len(bpropl) == 0:
raise UnificationException(
'Mismatch for property {} in {}, {}'.format(aname,
aid,
bid))
bprop = bpropl[0]
b_min = bprop.get('minOccurs')
b_max = bprop.get('maxOccurs')
for (p1, p2, np) in [
(a_min, b_min, 'minOccurs'),
(a_max, b_max, 'maxOccurs')]:
if p1 != p2:
raise UnificationException(
'Mismatch for {} of property {} in {}, {}'.format(np,
aname,
aid,
bid))
else:
actionbinding.append(deepcopy(aprop))
"""
Payload Profiles
"""
def unify_payload_profile(self, acppid, acpp, bcppid, bcpp,
aid, bid, direction):
logging.info('Unifying payload profiles {} {} and {} {}'.format(acppid,
aid,
bcppid,
bid))
cached, result = self.memo(acppid,
aid,
bcppid,
bid,
self.unify_payload_profile_results,
self.unify_payload_profile_exceptions)
if cached:
return result
try:
result = self.unify_payload_profile_memo(acppid, acpp, bcppid, bcpp, aid, bid, direction)
except UnificationException as e:
self.unify_payload_profile_exceptions[acppid, aid, bcppid, bid] = e
raise
else:
self.unify_payload_profile_results[acppid, aid, bcppid, bid] = result
return result
def unify_payload_profile_memo(self, acppid, acpp, bcppid, bcpp,
aid, bid, direction):
try:
if aid == bid is None:
logging.info("No payload profile, OK")
return None
elif aid is None or bid is None:
raise Exception('Missing payload profile {} or {}'.format(aid, bid))
else:
app = acpp.xpath('descendant::node()[@id="{}"]'.format(aid),
namespaces=self.NSMAP)[0]
bpp = bcpp.xpath('descendant::node()[@id="{}"]'.format(bid),
namespaces=self.NSMAP)[0]
abpp = lxml.etree.Element(app.tag, id=self.cppaid(acppid,
app.get('id'),
bcppid,
bpp.get('id')),
nsmap=self.NSMAP)
self.unify_payload_parts(acpp, bcpp, acppid, aid, app, bcppid, bid, bpp, abpp, direction)
return abpp
except UnificationException as e:
raise UnificationException('Payload Profile {} {}: {}'.format(aid, bid, e))
def unify_payload_parts(self, acpp, bcpp, acppid, appid, app, bcppid, bppid, bpp, abpp, direction):
app_part_list = app.xpath('child::cppa:PayloadPart',
namespaces=self.NSMAP)
bpp_part_list = bpp.xpath('child::cppa:PayloadPart',
namespaces=self.NSMAP)
alen = len(app_part_list)
blen = len(bpp_part_list)
if alen != blen:
raise UnificationException(
'Inconsistent number of payload parts {} {}: {}; {} {}: {}'.format(acppid,
appid,
alen,
bcppid,
bppid,
blen)
)
else:
for c in range(0,alen):
appart = app_part_list[c]
bppart = bpp_part_list[c]
self.unify_payload_part(acpp,
bcpp,
acppid,
appid,
appart,
bcppid,
bppid,
bppart,
c,
abpp,
direction)
def unify_payload_part(self, acpp, bcpp, acppid, appid, appart, bcppid,
bppid, bppart, c, abpp, direction):
abpart = lxml.etree.Element(appart.tag)
unify_cardinality(appart, bppart, abpart, '{} {} {}'.format(appid, bppid, c))
unify_atts(appart, bppart, abpart, False, ['requireSignature',
'requireEncryption'])
self.unify_simple_subelement(appart, bppart, abpart,
'cppa', 'PartName')
self.unify_simple_subelement(appart, bppart, abpart,
'cppa', 'MIMEContentType',
required=False, strictelements=False)
self.unify_simple_subelement(appart, bppart, abpart,
'cppa', 'Schema', required=False,
strictelements=False, intersectifmultiple=True)
self.unify_size_element(appart, bppart, abpart, 'MaxSize')
self.unify_properties(appid, acpp, appart,
bppid, bcpp, bppart, abpart)
self.unify_complex_subelement(acpp, bcpp,
(acppid, appid, bcppid, bppid),
appart,
bppart,
abpart,
'cppa', 'Signature',
self.unify_signature, direction)
self.unify_complex_subelement(acpp, bcpp,
(acppid, appid, bcppid, bppid),
appart,
bppart,
abpart,
'cppa', 'Encryption',
self.unify_encryption, direction)
abpp.append(abpart)
"""
Packaging
"""
def unify_package_elements(self, acppid, acpp, bcppid, bcpp,
a_channel, b_channel,
context,
ebmsbinding, direction):
apid = a_channel.get('package')
bpid = b_channel.get('package')
if apid is not None and bpid is not None:
self.unify_package(acppid, acpp,
bcppid, bcpp,
apid, bpid,
context,
direction)
abpid = self.cppaid(acppid, apid, bcppid, bpid)
ebmsbinding.set('package', abpid)
self.record_dependency(context, 'package', (acppid, apid, bcppid, bpid))
def unify_package(self, acppid, acpp,
bcppid, bcpp,
apid, bpid,
context,
direction):
cached, result = self.memo(acppid,
apid,
bcppid,
bpid,
self.unify_package_results,
self.unify_package_exceptions)
if cached:
return result
try:
result = self.unify_package_memo(acppid, acpp, bcppid, bcpp,
apid, bpid, context, direction)
except UnificationException as e:
self.unify_package_exceptions[acppid, apid, bcppid, bpid] = e
raise
else:
self.unify_package_results[acppid, apid, bcppid, bpid] = result
return result
def unify_package_memo(self, acppid, acpp, bcppid, bcpp,
apid, bpid, context, direction):
try:
if apid == bpid is None:
logging.info("No packaging, OK")
return None
elif apid is None or bpid is None:
raise Exception('Missing package {} or {}'.format(apid, bpid))
else:
logging.info('Attempting to unify packages {} and {}'.format(apid,
bpid))
apackage = acpp.xpath('descendant::node()[@id="{}"]'.format(apid),
namespaces=self.NSMAP)[0]
bpackage = bcpp.xpath('descendant::node()[@id="{}"]'.format(bpid),
namespaces=self.NSMAP)[0]
if apackage.tag != bpackage.tag:
raise UnificationException(
'Incompatible package types {} {}'.format(apackage.tag,
bpackage.tag))
elif apackage.tag not in self.packaging_handlers:
raise UnificationException(
'Unsupported package type {} {}'.format(apackage.tag,
bpackage.tag))
else:
try:
handler = self.packaging_handlers[apackage.tag]
logging.info("Package compatible {} {}".format(apid, bpid))
return handler(acpp, acppid, bcpp, bcppid,
apackage, bpackage, context, direction)
except UnificationException as e:
raise UnificationException(
'Mismatch in package {}: {}'.format(apackage.tag,
e))
except UnificationException as e:
raise UnificationException(
'Transport {} {}: {}'.format(apid, bpid, e.value)
)
def unify_soap_with_attachments_envelope(self, acpp, acppid, bcpp, bcppid,
apackage, bpackage, context, direction):
swael = lxml.etree.Element(apackage.tag, nsmap=self.NSMAP)
self.unify_mime_part_lists(swael, apackage, bpackage, context, acpp, bcpp, direction)
return swael
def unify_simple_soap_envelope(self, acpp, acppid, bcpp, bcppid,
apackage, bpackage, context, direction):
sel = lxml.etree.Element(apackage.tag, nsmap=self.NSMAP)
self.unify_mime_part_lists(sel, apackage, bpackage, context, acpp, bcpp, direction)
return sel
def unify_mime_envelope(self, acpp, acppid, bcpp, bcppid,
apackage, bpackage, context, direction):
sel = lxml.etree.Element(apackage.tag, nsmap=self.NSMAP)
self.unify_mime_part_lists(sel, apackage, bpackage, context, acpp, bcpp, direction)
return sel
def unify_mime_multipart_related(self, apart, bpart, context,
acpp, bcpp, direction):
mimepart = lxml.etree.Element(apart.tag, nsmap=self.NSMAP)
unify_atts(apart, bpart, mimepart, strictatts=True)
self.unify_mime_part_lists(mimepart, apart, bpart, context,
acpp, bcpp, direction)
return mimepart
def unify_simple_mime_part(self, apart, bpart, context,
acpp, bcpp, direction):
mimepart = lxml.etree.Element(apart.tag, nsmap=self.NSMAP)
aname = apart.get('PartName')
bname = bpart.get('PartName')
if aname != bname:
raise UnificationException(
'Incompatible PartName {} vs {}'.format(aname,
bname))
else:
mimepart.set('PartName',aname)
return mimepart
def unify_external_payload(self, apart, bpart, context, acpp, bcpp, direction):
mimepart = lxml.etree.Element(apart.tag, nsmap=self.NSMAP)
aname = apart.get('PartName')
bname = bpart.get('PartName')
if aname != bname:
raise UnificationException(
'Incompatible PartName {} vs {}'.format(aname,
bname))
else:
(acppid, axid, bcppid, bxid) = context
mimepart.set('PartName',aname)
a_ep_ch_id = apart.xpath('child::cppa:ChannelId/text()',
namespaces=self.NSMAP)[0]
b_ep_ch_id = bpart.xpath('child::cppa:ChannelId/text()',
namespaces=self.NSMAP)[0]
transportchannelid = (acppid, a_ep_ch_id, bcppid, b_ep_ch_id)
self.unify_channels(transportchannelid, acpp, bcpp, direction)
self.record_dependency(context, 'channel', transportchannelid)
abchannel = lxml.etree.SubElement(mimepart, cppa('ChannelId'))
abchannel.text = self.cppaid(acppid, a_ep_ch_id, bcppid, b_ep_ch_id)
return mimepart
def unify_mime_part_lists(self, parent, apackage, bpackage, context,
acpp, bcpp, direction):
apartl = apackage.xpath(
'child::*[local-name()!="Description" and local-name()!="CompressionType" ]'
)
bpartl = bpackage.xpath(
'child::*[local-name()!="Description" and local-name()!="CompressionType"]'
)
alen, blen = len(apartl), len(bpartl)
if alen != blen:
raise UnificationException(
'Mismatch in child count for package: {} {}'.format(alen,
blen)
)
else:
for apart, bpart in zip(apartl, bpartl):
if apart.tag != bpart.tag:
raise UnificationException(
'Mismatch in child type for package: {} {}'.format(alen,
blen)
)
else:
handler = self.mimepart_handlers[apart.tag]
parent.append(handler(apart, bpart, context, acpp, bcpp, direction))
"""
Auxiliary functions
"""
def memo(self, p1, p2, p3, p4, results, exceptions, p5=None, p6=None):
if (p1, p2, p3, p4, p5, p6) in results:
logging.info("Results cache hit for {} {} {} {} {} {}".format(
p1, p2, p3, p4, p5, p6)
)
return True, results[p1, p2, p3, p4, p5, p6]
elif (p1, p2, p3, p4, p5, p6) in exceptions:
logging.info("Exceptions cache hit for {} {} {} {} {} {}".format(
p1, p2, p3, p4, p5, p6)
)
raise exceptions[p1, p2, p3, p4, p5, p6]
else:
return False, None
def confirm_included(self, componenttype, id):
if not componenttype in self.included_components:
self.included_components[componenttype] = []
if not id in self.included_components[componenttype]:
self.included_components[componenttype].append(id)
def record_dependency(self, source, category, target):
if not source in self.depends_on:
self.depends_on[source] = {}
if not category in self.depends_on[source]:
self.depends_on[source][category] = [target]
logging.info("Dependency {} {} {} created".format(source,
category,
target))
targetlist = self.depends_on[source][category]
if target not in targetlist:
logging.info("Dependency {} {} {} added to list".format(source,
category,
target))
targetlist.append(target)
else:
logging.info("Dependency {} {} {} already on list".format(source,
category,
target))
def unify_simple_subelement(self, ael, bel, abel, childns, childtag,
required=True,
strictelements=True,
strictatts=True,
boolean=False,
intersectifmultiple=False):
"""
strictelements: either both inputs have the subelement or sequence of subelements or none
required: there must be at least one match
intersectifmultiple: if both inputs may have multiple elements, the unification is
their intersection if True; if False, there must be a one-to-one unification of subelement
instances.
strictatts: if one input has an attribute then the other must have it too with same value
boolean: if the value is Boolean
"""
logging.info("Unifying subelement {} for {}".format(childtag, abel.tag))
try:
achildren = ael.xpath('child::{}:{}'.format(childns, childtag),
namespaces=self.NSMAP)
bchildren = bel.xpath('child::{}:{}'.format(childns, childtag),
namespaces=self.NSMAP)
achildcount = len(achildren)
bchildcount = len(bchildren)
at_least_one_shared_child_matches = False
if strictelements and achildcount != bchildcount:
raise UnificationException(
'Child count mismatch for {}: {}, {}'.format(childtag,
achildcount,
bchildcount)
)
if achildcount == 0 and bchildcount == 0 and required:
raise UnificationException(
'Missing child {} {} {}'.format(childtag,
achildcount,
bchildcount)
)
elif achildcount == 0 and bchildcount > 0:
for bchild in bchildren:
abchild = lxml.etree.Element(ns(self.NSMAP[childns], childtag),
nsmap=self.NSMAP)
abchild.text = bchild.text
copy_atts(bchild,abchild)
abel.append(abchild)
#at_least_one_shared_child_matches = True
elif achildcount > 0 and bchildcount == 0:
for achild in achildren:
abchild = lxml.etree.Element(ns(self.NSMAP[childns], childtag),
nsmap=self.NSMAP)
abchild.text = achild.text
copy_atts(achild,abchild)
abel.append(abchild)
#at_least_one_shared_child_matches = True
elif achildcount == 0 and bchildcount > 0:
for bchild in bchildren:
abchild = lxml.etree.Element(ns(self.NSMAP[childns], childtag),
nsmap=self.NSMAP)
abchild.text = achild.text
copy_atts(achild,abchild)
abel.append(abchild)
#at_least_one_shared_child_matches = True
elif achildcount == 1 and bchildcount == 1:
abchild = lxml.etree.Element(ns(self.NSMAP[childns], childtag),
nsmap=self.NSMAP)
abchild.text = unify_boolean_or_text(achildren[0],
bchildren[0],
boolean)
unify_atts(achildren[0], bchildren[0],abchild,
strictatts=strictatts)
abel.append(abchild)
#at_least_one_shared_child_matches = True
elif achildcount >= 1 and bchildcount >= 1:
for counter, achild in enumerate(achildren, 1):
bchildmatchfound = False
for bchild in bchildren:
try:
abchild = lxml.etree.Element(ns(self.NSMAP[childns], childtag),
nsmap=self.NSMAP)
abchild.text = unify_boolean_or_text(achild,
bchild,
boolean)
unify_atts(achild, bchild, abchild,
strictatts=strictatts)
except UnificationException as e:
logging.info(
"Skipping non-matching {} #{}, suppressing {}".format(
achild.tag,
counter,
e.value)
)
else:
logging.info("Matched {} #{}".format(achild.tag, counter))
abel.append(abchild)
bchildmatchfound = True
at_least_one_shared_child_matches = True
break
if intersectifmultiple == False \
and at_least_one_shared_child_matches == True:
break
# we're here if we did not find a match for achild
if strictelements and not bchildmatchfound:
raise UnificationException(
'Missing child for {} {}'.format(
childtag,
counter)
)
if not at_least_one_shared_child_matches:
raise UnificationException(
'Empty intersection for {}'.format(childtag))
elif required and not at_least_one_shared_child_matches:
raise UnificationException(
'Missing match for {} {} {}'.format(childtag,
achildcount,
bchildcount))
except UnificationException as e:
logging.info("Subelements incompatible for {}: {}".format(childtag, e.value))
raise
else:
logging.info("Subelements compatible for {}".format(childtag))
def unify_boolean_subelement(self, ael, bel, abel, childns, childtag,
required=True, strictelements=True, strictatts=True):
return self.unify_simple_subelement(ael, bel, abel, childns, childtag,
required=required,
strictelements=strictelements,
strictatts=strictatts,
boolean=True)
def unify_complex_subelement(self,
acpp, bcpp, idtuple,
ael, bel, abel, childns, childtag,
handler,
direction=None):
logging.info("Unifying subelement {} for {} ({})".format(childtag, ael.tag, direction))
try:
aelements = ael.xpath('child::{}:{}'.format(childns, childtag),
namespaces=self.NSMAP)
belements = bel.xpath('child::{}:{}'.format(childns, childtag),
namespaces=self.NSMAP)
aelementcount = len(aelements)
belementcount = len(belements)
if aelementcount != belementcount:
raise UnificationException(
'Mismatch in count for child(ren) {}, {} vs {}'.format(childtag,
aelementcount,
belementcount)
)
elif aelementcount == 1 and belementcount == 1:
logging.info('Creating {} element, invoking {}'.format(
childtag, handler.func_name)
)
abchild = lxml.etree.SubElement(abel,ns(self.NSMAP[childns], childtag),
nsmap=self.NSMAP)
handler(acpp, bcpp, idtuple, aelements[0], belements[0], abchild, direction)
unify_atts(aelements[0], belements[0],abchild, strictatts=False)
elif aelementcount == belementcount == 0:
logging.info('Element {} not present'.format(childtag))
except UnificationException as e:
raise e
else:
logging.info("Subelements compatible for {}".format(childtag))
def cppaid(self, acppid, acid, bcppid, bcid):
if (acppid, acid, bcppid, bcid) in self.shortened:
return self.shortened[acppid, acid, bcppid, bcid]
else:
m = hashlib.sha224()
m.update('{}_{}_{}_{}'.format(acppid,acid, bcppid, bcid))
longvalue = '_'+base64.b32encode(m.digest())
for i in range(5, 50):
short = str(longvalue)[:i]
if short not in self.collisions:
self.shortened[acppid, acid, bcppid, bcid] = short
self.collisions[short] = (acppid, acid, bcppid, bcid)
return short
else:
logging.error('Collision {} for {} {} {} {}'.format(short,
acppid,
acid,
bcppid,
bcid))
(a, b, c, d) = self.collisions[short]
logging.error('Previous value for {} was {} {} {} {}'.format(short,
a,
b,
c,
d))
def inline_channel_features(self, cpp):
for feature_att in [
'securityBinding',
'reliableMessagingBinding',
'errorHandling',
'receiptHandling',
'addressing',
'compression',
'splitting',
'bundling'
]:
for element in cpp.xpath('//node()[@{}]'.format(feature_att),
namespaces= self.NSMAP):
logging.info(element)
binding_element = element.tag
binding_id = element.get(feature_att)
logging.info('Inlining feature with id {}'.format(binding_id))
referenced_node = cpp.xpath('//node()[@id="{}"]'.format(binding_id ),
namespaces= self.NSMAP)[0]
copied_node = deepcopy(referenced_node)
del element.attrib[feature_att]
del copied_node.attrib['id']
element.append(copied_node)
logging.info('Inlined {} with id {}'.format(binding_element,
binding_id))
return schema.ensure_ordered(cpp)
def certificate_required(self, element, certificatetype, default=False):
certtype_required_list = element.xpath('child::cppa:{}CertificateRequired'.format(certificatetype),
namespaces=self.NSMAP)
if len(certtype_required_list) > 0:
return xsd_boolean(certtype_required_list[0].text)
else:
return default
def cpp_level_acl_check(acpp, bcpp):
a_allowed_party_list_id = acpp.get('allowed')
b_allowed_party_list_id = bcpp.get('allowed')
a_denied_party_list_id = acpp.get('denied')
b_denied_party_list_id = bcpp.get('denied')
acl_check(a_allowed_party_list_id, a_denied_party_list_id, acpp,
b_allowed_party_list_id, b_denied_party_list_id, bcpp)
def service_specification_acl_check(a_service_spec, acpp,
b_service_spec, bcpp):
a_allowed_party_list_id = acpp.get('allowed')
b_allowed_party_list_id = bcpp.get('allowed')
a_denied_party_list_id = acpp.get('denied')
b_denied_party_list_id = bcpp.get('denied')
acl_check(a_allowed_party_list_id, a_denied_party_list_id, acpp,
b_allowed_party_list_id, b_denied_party_list_id, bcpp)
def service_binding_acl_check(a_servicebinding, acpp, b_servicebinding, bcpp):
logging.info('Checking ACLs for {}, {}'.format(
a_servicebinding.xpath(
'descendant::cppa:Description/text()',
namespaces=_NSMAP
),
b_servicebinding.xpath(
'descendant::cppa:Description/text()',
namespaces=_NSMAP
))
)
a_allowed_party_list_id = a_servicebinding.get('allowed')
b_allowed_party_list_id = b_servicebinding.get('allowed')
a_denied_party_list_id = a_servicebinding.get('denied')
b_denied_party_list_id = b_servicebinding.get('denied')
acl_check(a_allowed_party_list_id, a_denied_party_list_id, acpp,
b_allowed_party_list_id, b_denied_party_list_id, bcpp)
def action_binding_acl_check(a_actionbinding, acpp,
b_actionbinding, bcpp):
a_allowed_party_list_id = a_actionbinding.get('allowed')
b_allowed_party_list_id = b_actionbinding.get('allowed')
a_denied_party_list_id = a_actionbinding.get('denied')
b_denied_party_list_id = b_actionbinding.get('denied')
acl_check(a_allowed_party_list_id, a_denied_party_list_id, acpp,
b_allowed_party_list_id, b_denied_party_list_id, bcpp)
def acl_check(a_allowed_party_list_id, a_denied_party_list_id, acpp,
b_allowed_party_list_id, b_denied_party_list_id, bcpp):
a_parties = deferenced_party_ids(acpp)
b_parties = deferenced_party_ids(bcpp)
if a_allowed_party_list_id == None:
if b_allowed_party_list_id == None:
pass
else:
b_allowed_parties = lookup_party_identifiers(bcpp,
b_allowed_party_list_id, [])
acl_allow_match(b_allowed_parties, a_parties)
else:
a_allowed_parties = lookup_party_identifiers(acpp,
a_allowed_party_list_id, [])
acl_allow_match(a_allowed_parties, b_parties)
if b_allowed_party_list_id == None:
pass
else:
b_allowed_parties = lookup_party_identifiers(bcpp,
b_allowed_party_list_id)
acl_allow_match(b_allowed_parties, a_parties)
if a_denied_party_list_id == None:
if b_denied_party_list_id == None:
pass
else:
b_denied_parties = lookup_party_identifiers(bcpp,
b_denied_party_list_id, [])
acl_deny_match(b_denied_parties, a_parties)
else:
a_denied_parties = lookup_party_identifiers(acpp,
a_denied_party_list_id, [])
b_parties = deferenced_party_ids(bcpp)
acl_deny_match(a_denied_parties, b_parties)
if b_denied_party_list_id == None:
pass
else:
b_denied_parties = lookup_party_identifiers(bcpp,
b_denied_party_list_id)
a_parties = deferenced_party_ids(acpp)
acl_deny_match(b_denied_parties, a_parties)
def lookup_party_identifiers(cpp, id, parties=[]):
party_id_list = cpp.xpath('child::cppa:PartyIdList[@id="{}"]'.format(id),
namespaces=_NSMAP)[0]
logging.debug('Found list with id {}'.format(id))
for party_id in party_id_list.xpath('child::cppa:PartyId',
namespaces=_NSMAP):
pid = party_id.text
pidtype = party_id.get('type')
if (pid, pidtype) not in party_id_list:
parties.append((pid,pidtype))
for listref in party_id_list.xpath('child::cppa:PartyIdListRef/@href',
namespaces=_NSMAP):
parties = lookup_party_identifiers(cpp, listref, parties)
return parties
def deferenced_party_ids(cpp):
parties = []
for party_id in cpp.xpath('descendant::cppa:PartyId',
namespaces=_NSMAP):
pid = party_id.text
pidtype = party_id.get('type')
if (pid, pidtype) not in parties:
parties.append((pid,pidtype))
return parties
def acl_allow_match(allowed_party_ids, party_ids):
for (pid, ptype) in party_ids:
if (pid, ptype) not in allowed_party_ids:
raise UnificationException(
'{}, {} not in allowed list {}'.format(pid,
ptype,
allowed_party_ids))
else:
logging.debug('{}, {} found in allowed party list'.format(pid,
ptype))
def acl_deny_match(denied_party_ids, party_ids):
for (pid, ptype) in party_ids:
if (pid, ptype) in denied_party_ids:
raise UnificationException(
'{}, {} in denied list {}'.format(pid,
ptype,
denied_party_ids))
else:
logging.debug('{}, {} not in denied party list'.format(pid,
ptype))
def check_x509_data_content(anchorid, rootcert, anchor_certid, cpp):
anchor_cert = cpp.xpath(
'descendant::cppa:Certificate[@id="{}"]'.format(anchor_certid),
namespaces=_NSMAP)[0]
return check_x509_data_content_2(anchorid, rootcert, anchor_certid, anchor_cert, cpp)
def check_x509_data_content_2(anchorid, rootcert, anchor_certid, anchor_cert, cpp):
anchor_cert_data = anchor_cert.xpath(
'descendant::ds:X509Certificate/text()',
namespaces=_NSMAP)[0]
anchor_cert_data = remove_all_whitespace(anchor_cert_data)
logging.debug(
'Comparing against {} {} ... {} (len: {})'.format(anchor_certid,
anchor_cert_data[0:6],
anchor_cert_data[-6:],
len(anchor_cert_data)))
if str(rootcert) == str(anchor_cert_data):
logging.info(
'Referenced X509Certificate found in anchor {} cert {}'.format(anchorid,
anchor_certid))
return True
else:
return False
def unify_boolean_or_text(el1, el2, boolean):
if boolean:
return unify_boolean(el1, el2)
else:
return unify_text(el1, el2)
def unify_text(e1, e2):
if e1.text == e2.text:
return e1.text
else:
raise UnificationException('{}: {} vs {}'.format(e1.tag, e1.text, e2.text))
def unify_boolean(e1, e2):
if (e1.text == 'true' or e1.text == 1) and (e2.text == 'true' or e1.text == 1):
return 'true'
elif (e1.text == 'false' or e1.text == 0) and (e2.text == 'false' or e1.text == 0):
return 'false'
else:
raise UnificationException('Boolean {}: {} vs {}'.format(e1.tag, e1.text, e2.text))
def unify_atts(ael, bel, abel, strictatts=True, atts_to_match = None):
for (aside, bside) in [(ael, bel), (bel, ael)]:
for att in aside.attrib:
if att not in ['id', 'propertySetId']:
if atts_to_match != None and att in atts_to_match:
if aside.attrib[att] == bside.attrib[att]:
abel.set(att, aside.attrib[att])
else:
raise UnificationException(
'Attribute {} value mismatch: {} vs {}'.format(att,
aside.attrib[att],
bside.attrib[att]))
elif atts_to_match == None and att in bside.attrib:
if aside.attrib[att] == bside.attrib[att]:
abel.set(att, aside.attrib[att])
else:
raise UnificationException(
'Attribute {} value mismatch: {} vs {}'.format(att,
aside.attrib[att],
bside.attrib[att]))
elif strictatts:
# @@@ not covered yet
raise UnificationException(
'Attribute {} missing value in one of the inputs'.format(att))
else:
abel.set(att, aside.attrib[att])
def copy_atts(source, target):
for att in source.attrib:
target.set(att, source.get(att))
def unify_att(e1, e2, att):
if not e1.attrib[att] == e2.attrib[att]:
raise UnificationException('{}/@{}: {} vs {}'.format(e1.tag,
att,
e1.attrib[att],
e2.attrib[att]))
else:
return e1.attrib[att]
def unify_and_set_att(el1, el2, el3, att):
if el1.get(att) == el2.get(att):
if el1.get(att) != None:
el3.set(att, el1.get(att))
else:
raise UnificationException('{}/@{}: {} vs {}'.format(el1.tag,
att,
el1.get(att),
el2.get(att)))
def unify_cardinality(aelement, belement, abelement, context=''):
logging.info('Cardinality check for {}'.format(context))
for att in ['minOccurs', 'maxOccurs']:
amin = aelement.get(att)
bmin = belement.get(att)
if amin == bmin:
if amin is None:
pass
else:
abelement.set(att, amin)
if amin != bmin:
raise UnificationException(
'Incompatible {} cardinality in {}: {} vs {}'.format(att,
context,
amin,
bmin))
elif amin is not None:
abelement.set(att, amin)
def reverse(direction):
if direction == 'send':
return 'receive'
else:
return 'send'
def cppa(el):
return '{{{}}}{}'.format(_NSMAP['cppa'],el)
def xml(el):
return '{{{}}}{}'.format(_NSMAP['xml'],el)
def ns(ns,el):
return '{{{}}}{}'.format(ns,el)
def get_description_value_if_present(el):
descriptions = el.xpath('child::cppa:Description',
namespaces=_NSMAP)
if len(descriptions)>0:
return ' ('+(descriptions[0]).text+')'
else:
return ''
def create_username(acppid, aelid, bcppid, belid, len=15):
m = hashlib.sha224()
m.update('{}_{}_{}_{}'.format(acppid, aelid, bcppid, belid))
longvalue = base64.b64encode(m.digest())
return str(longvalue)[:len]
def create_random_password(len=20):
return str(uuid.uuid4())[:len]
def remove_all_whitespace(inputstring):
pattern = re.compile(r'\s+')
return re.sub(pattern, '', inputstring)
def xsd_boolean(value):
if value == '1':
return True
elif value == 'true':
return True
elif value == '0':
return False
elif value == 'false':
return False
else:
return None
def delegated_party_params(delegation):
delegated_party = delegation.xpath('child::cppa:PartyId',
namespaces=_NSMAP)[0]
delegated_party_cpp_list = delegation.xpath('child::cppa:ProfileIdentifier',
namespaces=_NSMAP)
if len(delegated_party_cpp_list) > 0:
delegated_party_cpp_id = delegated_party_cpp_list[0].text
else:
delegated_party_cpp_id = None
return delegated_party.text, delegated_party.get('type'), delegated_party_cpp_id
def prefix_identifiers(cpp, prefix=''):
for xpexpr in [
'descendant::cppa:ChannelId',
'descendant::cppa:RequestChannelID',
'descendant::cppa:ReceiptHandling/cppa:ReceiptChannelId',
'descendant::cppa:ErrorHandling/cppa:SenderErrorsReportChannelId',
'descendant::cppa:ErrorHandling/cppa:ReceiverErrorsReportChannelId',
'descendant::cppa:PullHandling/cppa:PullChannelId',
'descendant::cppa:AlternateChannel',
]:
for item in cpp.xpath(xpexpr,
namespaces = _NSMAP):
item.text = prefix+item.text
for xpexpr in [
'descendant::cppa:NamedChannel[@id]',
'descendant::cppa:DelegationChannel[@id]',
'descendant::cppa:WSChannel[@id]',
'descendant::cppa:TransportChannel[@id]',
'descendant::cppa:ebMS2Channel[@id]',
'descendant::cppa:ebMS3Channel[@id]',
'descendant::cppa:AS1Channel[@id]',
'descendant::cppa:AS2Channel[@id]',
'descendant::cppa:AS3Channel[@id]',
'descendant::cppa:AMQPChannel[@id]',
'descendant::cppa:SSHKey[@id]'
]:
for item in cpp.xpath(xpexpr,
namespaces = _NSMAP):
item.set('id', prefix+item.get('id'))
for xpexpr in [
'descendant::cppa:SSHClientKeyRef[@keyId]',
'descendant::cppa:SSHServerKeyRef[@keyId]'
]:
for item in cpp.xpath(xpexpr,
namespaces = _NSMAP):
item.set('keyId', prefix+item.get('keyId'))
def _identity_transform(input):
logging.default('Applying identity transform ..')
return input
def _apply_units(xpathresult):
if len(xpathresult) is 0:
return 0
element = xpathresult[0]
value = float(element.text)
unit = element.get('unit')
if unit is None:
return int(value)
elif unit is 'da':
return int(value*pow(10,1))
elif unit is 'h':
return int(value*pow(10,2))
elif unit is 'k':
return int(value*pow(10,3))
elif unit is 'M':
return int(value*pow(10,6))
elif unit is 'G':
return int(value*pow(10,9))
elif unit is 'T':
return int(value*pow(10,12))
elif unit is 'P':
return int(value*pow(10,15))
elif unit is 'E':
return int(value*pow(10,18))
elif unit is 'Z':
return int(value*pow(10,21))
elif unit is 'Y':
return int(value*pow(10,24))
def _profileinfo(cpp):
return cpp.xpath('child::cppa:ProfileInfo',
namespaces=_NSMAP)[0]
|
StarcoderdataPython
|
3505865
|
<gh_stars>1-10
#!usr/bin/env python
import sys,os,codecs
import xml.etree.ElementTree as ET
from Transducer import transduce,chinese,baptist,decompose
def main():
# for line in open('lahu_writing.txt','rU'):
# for word in line.split():
# print word,
# print transduce(word,decompose),
# print transduce(transduce(word,decompose),chinese),
# print transduce(transduce(word,decompose),baptist)
f = open('4testLahuTexts.xml','rt')
tree = ET.parse(f)
f.close()
for node in tree.findall('.//word/words/word/item'):
if node.attrib['type'] == 'txt':
form = codecs.encode(node.text,'utf8')
print form,
print [form],
print transduce(form,decompose),
print transduce(transduce(form,decompose),chinese),
print transduce(transduce(form,decompose),baptist)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
164487
|
from famille.models import has_user_related, get_user_related
from famille.utils import get_context
def related_user(request):
"""
A context processor that returns the related user from a request.user.
:param request: the request to be processed
"""
if not has_user_related(request.user):
return {}
return {"related_user": get_user_related(request.user)}
def base(request):
"""
Providing base variables.
:param request: the request to be processed
"""
return get_context()
|
StarcoderdataPython
|
5094605
|
import math
from scipy.spatial import ConvexHull
from pyray.shapes.oned.circle import *
from pyray.axes import *
from pyray.shapes.twod.plane import *
from pyray.shapes.twod.functional import *
def paraboloid_circles_rotatingplane(basepath='.\\', scale=200, shift=np.array([1000,1000,0])):
im_ind = 0
for i in (np.concatenate((np.arange(0.5,1,0.01), np.arange(1,3,0.05),np.arange(3,10,0.6)),axis=0) + 1e-4): #Controls the rotation of the plane.
r2 = general_rotation(np.array([.3, .3, .3]), np.pi/i)
r1 = np.eye(4)
orthogonal_vec = np.dot(r2, np.array([0,1,0]))
orthogonal_vec = orthogonal_vec/sum(orthogonal_vec**2) # Should be unnecessary since rotation doesn't change magnitude.
for j in range(4,5): # Controls the rotation of the paraboloid.
r = rotation(3, 2*np.pi*j/30.0)
r1[:3,:3] = r
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
translate = np.array([0,0,1.5])
rotated_xz_plane(draw, r, r2, scale, shift, translate)
render_scene_4d_axis(draw, r1, 4)
for z in np.arange(0.001, 3.5, 0.01):
#generalized_circle(draw, np.array([0,0,z]), np.array([0,0,1]), np.sqrt(z), r, rgba = (255,20,147,50))
#generalized_arc(draw, r, np.array([0,0,z]), np.array([0,0,1]), np.array([np.sqrt(z),0,z]), np.sqrt(z), 0.5, (255,20,147,50))
#generalized_arc(draw, r, np.array([0,0,z]), np.array([0,0,1]), np.array([-np.sqrt(z),0,z]), np.sqrt(z), 0.5, (255,20,147,10))
pt1 = np.dot(r, np.array([-np.sqrt(z),0,z]))
theta = np.pi * 2.0 / 180.0
rot = general_rotation(np.dot(r,np.array([0,0,1])),theta)
for j in range(0,180):
pt2 = np.dot(rot, pt1)
pt2Orig = np.dot(np.transpose(r),pt2)
if sum(pt2Orig * orthogonal_vec) - 1.5*orthogonal_vec[2] > 0:
draw.line((pt1[0]*scale + shift[0], pt1[1]*scale+shift[1], pt2[0]*scale+shift[0], pt2[1]*scale+shift[1]),\
fill=(255,20,147,100), width=5)
else:
draw.line((pt1[0]*scale + shift[0], pt1[1]*scale+shift[1], pt2[0]*scale+shift[0], pt2[1]*scale+shift[1]),\
fill=(255,20,147,40), width=5)
pt1 = pt2
three_d_parabola(draw, r, r2)
im.save(basepath + 'im' + str(im_ind) + '.png')
im_ind = im_ind + 1
def paraboloid_circles(basepath='.\\', scale=200, shift=np.array([1000,1000,0])):
r2=general_rotation(np.array([0,0,1]),np.pi/2)
orthogonal_vec = np.dot(r2, np.array([0,1,0]))
r = rotation(3, 2*np.pi*4/30.0)
r1 = np.eye(4)
r1[:3,:3] = r
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
translate = np.array([0,1.5,0])
#rotated_xz_plane(draw, r, r2, scale, shift, translate=translate)
render_scene_4d_axis(draw, r1, 4)
for z in np.arange(0.001, 3.5, 0.01):
generalized_arc(draw, r, np.array([0,0,z]), np.array([0,0,1]), np.array([np.sqrt(z),0,z]),
np.sqrt(z), 0.5, (255,20,147,50))
generalized_arc(draw, r, np.array([0,0,z]), np.array([0,0,1]), np.array([-np.sqrt(z),0,z]),
np.sqrt(z), 0.5, (255,20,147,10))
three_d_parabola(draw, r, r2)
im.save(basepath + 'im' + str(0) + '.png')
def paraboloid_dirty(im_ind=0, scale=200, shift=np.array([1000,1000,0]), opacity=60,
basepath='.\\'):
#i=1
#r2 = general_rotation(np.array([.3, .3, .3]), np.pi/i)
r1 = np.eye(4)
rot = general_rotation(np.array([0,0,1]), np.pi/20.0 * (8 + im_ind/3.0))
j=4
#r = rotation(3, 2 * np.pi* j /30.0)
r=np.eye(3)
rr = general_rotation(np.array([0,1,0]), np.pi/20.0 * (im_ind/7.0))
r = np.dot(r,rr)
r = np.dot(r, rot)
r1[:3,:3] = r
im = Image.new("RGB", (2048, 2048), "black")
draw = ImageDraw.Draw(im, 'RGBA')
#translate = np.array([0, 0, 1.5])
render_scene_4d_axis(draw, r1, 4, scale, shift)
for z in np.arange(0.001, 3.5, 0.02):
if z<=1:
prcnt1=0.0; point1 = np.array([np.sqrt(z),0,z])
prcnt2=1.0; point2 = np.array([-np.sqrt(z),0,z])
else:
angle=2*np.arccos(1/np.sqrt(z))
prcnt1=-angle/2/np.pi; point1 = np.array([1,np.sqrt(z-1),z])
prcnt2=-1+prcnt1; point2 = np.array([-np.sqrt(z-1),1,z])
generalized_arc(draw, r, center=np.array([0,0,z]), vec=np.array([0,0,1]),
point=point1,
radius=np.sqrt(z), prcnt=prcnt1, rgba=(255,20,147,50))
generalized_arc(draw, r, np.array([0,0,z]), np.array([0,0,1]), point2,
np.sqrt(z), prcnt2, (255,20,147,10))
## Highlight axes
xax1=np.array([-100.0,0,0.0]);xax1=np.dot(r,xax1)*scale+shift
xax2=np.array([100.0,0,0.0]);xax2=np.dot(r,xax2)*scale+shift
draw.line((xax1[0], xax1[1], xax2[0], xax2[1]), fill=(235,255,0), width=4)
xax1=np.array([0.0,-100,0.0]);xax1=np.dot(r,xax1)*scale+shift
xax2=np.array([0.0,100,0.0]);xax2=np.dot(r,xax2)*scale+shift
draw.line((xax1[0], xax1[1], xax2[0], xax2[1]), fill=(235,255,0), width=4)
xzgradients(draw, r, 1.0) # draws the arrows correponding to gradients.
## Draw the plane.
pt1 = np.array([1.0,-1.2,0]); pt2 = np.array([1.0,1.2,0])
z = 1.2**2+1
pt3 = np.array([1.0,-1.2,z]); pt4 = np.array([1.0,1.2,z])
pt1 = np.dot(r,pt1)*scale+shift; pt2 = np.dot(r,pt2)*scale+shift
pt3 = np.dot(r,pt3)*scale+shift; pt4 = np.dot(r,pt4)*scale+shift
draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt4[0], pt4[1]), (pt3[0], pt3[1])],\
(0,102,255,150))
pt = np.array([1.0,0,1.0]);pt=np.dot(r,pt)*scale+shift
pt_z = np.array([0.0,0,1.0]); pt_z=np.dot(r,pt_z)*scale+shift
draw.line((pt[0], pt[1], pt_z[0], pt_z[1]), fill=(255,0,120), width=4)
pt_y = np.array([1.0,0,0.0]); pt_y=np.dot(r,pt_y)*scale+shift
#draw.line((pt[0], pt[1], pt_y[0], pt_y[1]), fill=(255,0,120), width=4)
draw.ellipse((pt[0]-10, pt[1]-10, pt[0]+10, pt[1]+10), fill = (0,255,0))
pt = shift
draw.ellipse((pt[0]-10, pt[1]-10, pt[0]+10, pt[1]+10), fill = (255,255,0))
im.save(basepath + 'im' + str(im_ind) + '.png')
def xzgradients(draw, r, y):
for x in [-1.2,-0.7,-0.4,0.0,0.4,0.7,1.2]:
z = x*x + y*y
#draw_points(draw, r, y, x)
#arrowV1(draw,r,np.array([y,x,z]), np.array([2.5*y,2.5*x,z]), (204,102,255))
arrowV1(draw,r,np.array([y,x,z]), np.array([2.5*y,2.5*x,z]), (255,20,147))
#arrowV1(draw,r,np.array([y,x,z]), np.array([y+1.0,x,z]), (0,102,255))
arrowV1(draw,r,np.array([y,x,z]), np.array([y-1.0,x,z]), (0,102,255))
def paraboloidTangent(draw, r, x1, y1, d = 1.0, rgba = (120,80,200,150), scale = 200,
shift = np.array([1000,1000,0])):
'''
Draws a tangent plane to a paraboloid: x^2+y^2 = z at point given by coordinates (x1, y1)
'''
x2 = x1-d
y2 = y1+d
pt1 = np.dot(r, np.array([x2, y2, z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
x2 = x1+d
y2 = y1+d
pt2 = np.dot(r, np.array([x2, y2, z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
x2 = x1+d
y2 = y1-d
pt3 = np.dot(r, np.array([x2, y2, z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
x2 = x1-d
y2 = y1-d
pt4 = np.dot(r, np.array([x2, y2, z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])], rgba)
def paraboloid(x, y, coeff=0.1, intercept=0.1):
'''
'''
return coeff*(x**2 + y**2) - intercept
def paraboloid_intersection(draw, r, x1, y1, coeff, intercept,
shift=np.array([1000.0, 1000.0, 0.0]), scale=200.0,
rgba=(250,250,20), start_line=-12, extend=1.7, width=10):
'''
Draws the intersection arc of two paraboloids.
args:
extend: The amount by which the arc is to extend from its starting point. This parameter was tuned by hit and trial.
'''
def parametrized_pt(t, x1, y1, coeff, intercept):
'''
See 180225
'''
x = t
y = (x1**2 + y1**2 - 2*t*x1)/(2*y1)
z = coeff*(x**2+y**2) - intercept
return np.array([x, y, z])
t = start_line
pt1 = np.dot(r, parametrized_pt(t, x1, y1, coeff, intercept)) * scale + shift[:3]
#for i in range(1, int_line):
while t <= abs(start_line)*extend:
t += 1/10.0
pt2 = np.dot(r, parametrized_pt(t, x1, y1, coeff, intercept)) * scale + shift[:3]
draw.line((pt1[0], pt1[1], pt2[0], pt2[1]), fill=rgba, width=width)
pt1 = pt2
def draw_paraboloids(scale=12.5, basedir=".\\"):
'''
Draws a pair of flapping paraboloids.
args:
scale: How big should the paraboloids be relative to the image.
basedir:The directory where the images are to be saved.
In the main pyray repo, basedir is ..\\images\\RotatingCube\\
'''
sep = 8
base_coeff = 0.01
start_line = -12
r1 = np.eye(4)
for j in range(21,22):
r = rotation(3, np.pi/30*j)
r1[:3,:3] = r
for i1 in range(20):
i = 2.5*np.sin(i1*np.pi/10.0)
im = Image.new("RGB", (2048, 2048), (1, 1, 1))
draw = ImageDraw.Draw(im, 'RGBA')
render_scene_4d_axis(draw, r1, 4)
fn = lambda x, y : paraboloid(x, y, coeff=i*base_coeff, intercept=i)
cfn = lambda x, y : 0.0
#drawFunctionalXYGrid(draw, r, scale=50, fn=cfn, extent=15)
drawXYGrid(draw, r, meshLen=1.0)
drawFunctionalXYGrid(draw, r, scale=scale, fn=fn, extent=20, rgba2=(0,255,0,75),
saperatingPlane=np.array([-1,-1,sep]))
shift1 = np.dot(r, np.array([sep,sep,0]))*scale + np.array([1000.0, 1000.0, 0])
drawFunctionalXYGrid(draw, r, scale=scale, fn=fn, shift=shift1, rgba=(202,200,20,150), extent=20,
rgba2=(202,200,20,75), saperatingPlane=np.array([1,1,sep]))
draw_circle_x_y(draw, r, center=np.array([0,0]), radius=np.sqrt(1/base_coeff), scale=scale)
draw_circle_x_y(draw, r, center=np.array([0,0]), radius=np.sqrt(1/base_coeff),
scale=scale, shift=shift1)
paraboloid_intersection(draw, r, sep, sep, i*base_coeff, i, scale=scale, start_line=start_line)
im.save(basedir + "im" + str(i1) + ".png")
def draw_paraboloidsV2(scale=20.0, ind=0):
sep = 8
base_coeff = 0.02
start_line = -12.0
r1 = np.eye(4)
j = 21
r = rotation(3, np.pi/30*j)
r1[:3,:3] = r
shift = np.array([1000.0, 1000.0, 0.0])
for i1 in range(2,3):
pt_orig = -9.0*np.array([np.cos(np.pi*6.0/15.0), np.sin(np.pi*6.0/15.0), 0])
for ind in range(15):
#i = 2.5*np.sin(i1*np.pi/10.0)
i = 2.0
c = i*base_coeff
im = Image.new("RGB", (2048, 2048), (1, 1, 1))
draw = ImageDraw.Draw(im, 'RGBA')
fn = lambda x, y : paraboloid(x, y, coeff=i*base_coeff, intercept=i)
cfn = lambda x, y : 0.0
drawFunctionalXYGrid(draw, r, scale=scale, fn=fn, extent=30, rgba2=(0,255,0,40),
saperatingPlane=np.array([-1,-1,sep]))
shift1 = np.dot(r, np.array([sep,sep,0.0]))*scale + np.array([1000.0, 1000.0, 0.0])
drawFunctionalXYGrid(draw, r, scale=scale, fn=fn, shift=shift1, rgba=(202,200,20,150),
extent=30, rgba2=(202,200,20,40), saperatingPlane=np.array([1,1,sep]))
draw_circle_x_y(draw, r, center=np.array([0,0]), radius=np.sqrt(1/base_coeff),
scale=scale)
draw_circle_x_y(draw, r, center=np.array([0,0]), radius=np.sqrt(1/base_coeff),
scale=scale, shift=shift1)
paraboloid_intersection(draw, r, sep, sep, i*base_coeff, i, scale=scale,
start_line=start_line)
render_scene_4d_axis(draw, r1, 4)
#pt_orig = 10.0*np.array([np.cos(np.pi*ind/15.0), np.sin(np.pi*ind/15.0), 0])
pt = pt_orig
z1 = i*base_coeff*(pt[0]**2 + pt[1]**2) - i
z2 = i*base_coeff*((pt[0] - sep)**2 + (pt[1] - sep)**2) - i
pt1 = np.array([pt[0],pt[1],z1])
pt1 = np.dot(r, pt1)*scale+shift
pt2 = np.array([pt[0],pt[1],z2])
pt2 = np.dot(r, pt2)*scale+shift
pt = np.dot(r, pt)*scale+shift
draw.ellipse((pt[0]-5, pt[1]-5, pt[0]+5, pt[1]+5), fill = (255,0,120))
draw.ellipse((pt1[0]-5, pt1[1]-5, pt1[0]+5, pt1[1]+5), fill = (0,255,0))
draw.ellipse((pt2[0]-5, pt2[1]-5, pt2[0]+5, pt2[1]+5), fill = (202,200,20))
draw.line((pt[0], pt[1], pt2[0], pt2[1]), fill="white", width=3)
[x0, y0] = [0, 0]
[x1, y1] = pt_orig[:2]
[a1, b1, d1] = [2*c*(x1-x0), 2*c*(y1-y0), c*(x1-x0)**2 + c*(y1-y0)**2 - i -2*c*(x1-x0)*x1 - 2*c*(y1-y0)*y1]
[x0_1, y0_1] = [sep, sep]
[a2, b2, d2] = [2*c*(x1-x0_1), 2*c*(y1-y0_1), c*(x1-x0_1)**2 + c*(y1-y0_1)**2 \
- i -2*c*(x1-x0_1)*x1 - 2*c*(y1-y0_1)*y1]
[line_pt1, line_pt2] = plane_intersection(draw, r, plane1=[a1, b1, d1], plane2=[a2, b2, d2],
x_start=pt_orig[0]-7, x_end=pt_orig[0]+7, scale=scale, shift=shift)
generalizedParaboloidTangent(draw, r, pt_orig[0], pt_orig[1], d=10.0, x0=0, y0=0,
c=c, i=i , scale=scale, shift=shift, line_pt1=line_pt1, line_pt2=line_pt2, rgba=(153,255,102,150))
generalizedParaboloidTangent(draw, r, pt_orig[0], pt_orig[1], d=10.0, x0=sep, y0=sep,
c=c, i=i , scale=scale, shift=shift, line_pt1=line_pt1, line_pt2=line_pt2, rgba=(255,204,102,150))
mat = np.array([[a1, b1],[a2, b2]])
rhs = np.array([-d1, -d2])
pt_orig = np.linalg.solve(mat, rhs)
pt_orig = np.append(pt_orig, 0)
pt = np.dot(r, pt_orig)*scale+shift
draw.line((pt[0], pt[1], line_pt1[0], line_pt1[1]))
draw.line((pt[0], pt[1], line_pt2[0], line_pt2[1]))
drawXYGrid(draw, r, meshLen=1.0)
im.save("Images\\RotatingCube\\im" + str(ind) + ".png")
def three_d_parabola(draw, r, r2, scale = 200, shift = np.array([1000,1000,0])):
'''
Draws a curve described by the intersection of a plane with the paraboloid x^2+y^2 = z
params:
r: The rotation matrix the whole scene is rotated by
r2: The rotation matrix that the inetersecting plane is to be rotated by
'''
# Assume you start with the x-z plane
orthogonal_vec = np.array([0,1,0])
orthogonal_vec = np.dot(r2, orthogonal_vec)
b = 1.5
[thetax, thetay, thetaz] = orthogonal_vec
c1 = -thetax/thetaz/2
c2 = -thetay/thetaz/2
c3 = np.sqrt(b + c1**2 + c2**2)
x_min = max((c1 - abs(c3)),-np.sqrt(3.5))
x_max = min((c1 + abs(c3)),np.sqrt(3.5))
y = c2 + np.sqrt(c3*c3 - (x_min-c1)*(x_min-c1))
pt1 = np.dot(r, [x_min, y, (x_min**2+y**2)]) * scale + shift[:3]
for x in np.arange(x_min, x_max, 0.01):
y = c2 + np.sqrt(c3*c3 - (x-c1)*(x-c1))
pt2 = np.dot(r, [x, y, (x**2 + y**2)]) * scale + shift[:3]
if x**2 + y**2 < 3.5:
draw.line((pt1[0], pt1[1], pt2[0], pt2[1]), fill = (204,102,255), width=5)
pt1 = pt2
y = c2 + np.sqrt(c3*c3 - (x_min-c1)*(x_min-c1))
pt1 = np.dot(r, [x_min, y, (x_min**2+y**2)]) * scale + shift[:3]
for x in np.arange(x_min, x_max, 0.01):
y = c2 - np.sqrt(c3*c3 - (x-c1)*(x-c1))
pt2 = np.dot(r, [x, y, (x**2 + y**2)]) * scale + shift[:3]
if x**2 + y**2 < 3.5:
draw.line((pt1[0], pt1[1], pt2[0], pt2[1]), fill = (204,102,255), width=5)
pt1 = pt2
def plane_intersection(draw, r, plane1=[0,0,0], plane2=[0,0,0], x_start=0, x_end=0, scale=200,
shift=np.array([1000,1000,0])):
[a1,b1,d1] = plane1
[a2,b2,d2] = plane2
x = x_start
y = ((d2-d1) + (a2-a1)*x)/(b1-b2)
z = a1*x+b1*y+d1
pt1 = np.dot(r, np.array([x, y, z])) * scale + shift[:3]
init_pt = pt1
while x <= x_end:
x += 1/10.0
y = ((d2-d1) + (a2-a1)*x)/(b1-b2)
z = a1*x+b1*y+d1
pt2 = np.dot(r, np.array([x, y, z])) * scale + shift[:3]
#draw.line((pt1[0], pt1[1], pt2[0], pt2[1]), fill = "white", width=3)
pt1 = pt2
fin_pt = pt1
return [init_pt, fin_pt]
def paraboloidTangentV2(draw, r, x1, y1, c=1.0, d = 1.0, rgba = (120,80,200,150), scale = 200,
shift = np.array([1000,1000,0]), line_pt1=None, line_pt2=None):
'''
Draws a tangent plane to a paraboloid: x^2+y^2 = z at point given by coordinates (x1, y1)
'''
x2 = x1-d
y2 = y1+d
pt1 = np.dot(r, np.array([x2, y2, c*z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
x2 = x1+d
y2 = y1+d
pt2 = np.dot(r, np.array([x2, y2, c*z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
x2 = x1+d
y2 = y1-d
pt3 = np.dot(r, np.array([x2, y2, c*z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
x2 = x1-d
y2 = y1-d
pt4 = np.dot(r, np.array([x2, y2, c*z_plane(x2, y2, x1, y1)])) * scale + shift[:3]
#draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])], rgba)
sqr1 = [[pt1[0], pt1[1]], [pt2[0], pt2[1]], [pt3[0],pt3[1]], [pt4[0], pt4[1]]]
if line_pt1 is not None:
sqr1.append([line_pt1[0], line_pt1[1]])
if line_pt2 is not None:
sqr1.append([line_pt2[0], line_pt2[1]])
hull = ConvexHull([i[:2] for i in sqr1]).vertices
poly = [(sqr1[i][0], sqr1[i][1]) for i in hull]
draw.polygon(poly, rgba)
def z_plane(x, y, x1, y1):
'''
Returns the z-coordinate of a point on a plane that is tangent to the paraboloid z = x^2 + y^2
'''
return 2*x1*x + 2*y1*y - (x1**2 + y1**2)
def generalizedParaboloidTangent(draw, r, x1, y1, d=1.0, x0=0.0, y0=0.0, rgba=(120,80,200,150),
c=1.0, i=0.0,
scale=200, shift=np.array([1000,1000,0]),
line_pt1=None, line_pt2=None, p=1.0):
'''
Draws a tangent plane to a paraboloid: x^2+y^2 = z at point given by coordinates (x1, y1)
'''
x2 = x1-d
y2 = y1+d
pt1 = np.dot(r, np.array([x2, y2, z_plane_generalized(x2, y2, x1, y1, x0, y0, c, i)]))*scale + shift[:3]
x2 = x1+d
y2 = y1+d
pt2 = np.dot(r, np.array([x2, y2, z_plane_generalized(x2, y2, x1, y1, x0, y0, c, i)]))*scale + shift[:3]
x2 = x1+d
y2 = y1-d
pt3 = np.dot(r, np.array([x2, y2, z_plane_generalized(x2, y2, x1, y1, x0, y0, c, i)]))*scale + shift[:3]
x2 = x1-d
y2 = y1-d
pt4 = np.dot(r, np.array([x2, y2, z_plane_generalized(x2, y2, x1, y1, x0, y0, c, i)]))*scale + shift[:3]
#draw.polygon([(pt1[0], pt1[1]), (pt2[0], pt2[1]), (pt3[0], pt3[1]), (pt4[0], pt4[1])], rgba)
sqr1 = [[pt1[0], pt1[1]], [pt2[0], pt2[1]], [pt3[0],pt3[1]], [pt4[0], pt4[1]]]
if line_pt1 is not None:
sqr1.append([line_pt1[0], line_pt1[1]])
if line_pt2 is not None:
sqr1.append([line_pt2[0], line_pt2[1]])
try:
hull = ConvexHull([i[:2] for i in sqr1]).vertices
except:
hull = range(4)
orig_pt = np.dot(r, np.array([x1,y1,z_plane_generalized(x1, y1, x1, y1, x0, y0, c, i)]))*scale+shift[:3]
poly = [(sqr1[i][0]*p+orig_pt[0]*(1-p), sqr1[i][1]*p+orig_pt[1]*(1-p)) for i in hull]
draw.polygon(poly, rgba)
def z_plane_generalized(x, y, x1, y1, x0, y0, c=1.0, i=0.0):
d = c*(x1-x0)**2 + c*(y1-y0)**2 - i -2*c*(x1-x0)*x1 - 2*c*(y1-y0)*y1
return 2*c*(x1-x0)*x + 2*c*(y1-y0)*y + d
|
StarcoderdataPython
|
347321
|
#
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from polygraphy import mod
from polygraphy.tools import util as tools_util
from polygraphy.tools.args import (DataLoaderArgs, ModelArgs, OnnxLoaderArgs,
OnnxSaveArgs, OnnxShapeInferenceArgs)
from polygraphy.tools.surgeon.subtool.base import BaseSurgeonSubtool
onnx_backend = mod.lazy_import("polygraphy.backend.onnx")
gs = mod.lazy_import("onnx_graphsurgeon")
class Sanitize(BaseSurgeonSubtool):
"""
Clean up and optimize an ONNX model.
"""
def __init__(self):
super().__init__("sanitize")
self.subscribe_args(ModelArgs(model_required=True, inputs="--override-inputs", model_type="onnx"))
self.subscribe_args(DataLoaderArgs())
self.subscribe_args(OnnxShapeInferenceArgs(default=True, enable_force_fallback=True))
self.subscribe_args(OnnxLoaderArgs(output_prefix=""))
self.subscribe_args(OnnxSaveArgs(infer_shapes=True, required=True))
def add_parser_args(self, parser):
const_fold_args = parser.add_argument_group("Constant Folding", "Options for folding constants")
const_fold_args.add_argument("--fold-constants", help="Fold constants in the graph by computing subgraphs whose values "
"are not dependent on runtime inputs.", action="store_true", default=None)
const_fold_args.add_argument("--num-passes", "--num-const-fold-passes", help="The number of constant folding passes to run. "
"Sometimes, subgraphs that compute tensor shapes may not be foldable in a single pass. "
"If not specified, Polygraphy will automatically determine the number of passes required. ",
type=int, default=None, dest="num_const_fold_passes")
const_fold_args.add_argument("--partitioning", help="Controls how to partition the graph during constant folding: {{"
"'basic': Partition the graph so failures in one part do not affect other parts, "
"'recursive': In addition to partitioning the graph, partition partitions where needed}} ",
choices=["basic", "recursive"], default=None)
const_fold_args.add_argument("--no-fold-shapes", help="Disable folding Shape nodes and subgraphs that operate on shapes",
dest="fold_shapes", default=True, action="store_false")
parser.add_argument("--cleanup", help="Run dead layer removal on the graph. This is generally not required if other options are set. ",
action="store_true", default=False)
super().add_parser_args(parser)
def run_impl(self, args):
# First do all processing that requires an ONNX-GraphSurgeon graph, then do everything
# that operates on the ONNX model. This lets us avoid ONNX-GraphSurgeon import if we don't
# need it.
def do_graph_processing(model):
graph = None
def get_graph():
nonlocal graph
if graph is None:
graph = gs.import_onnx(model)
return graph
user_input_metadata = self.arg_groups[ModelArgs].input_shapes
if user_input_metadata:
graph = get_graph()
graph = tools_util.override_input_shapes(graph, user_input_metadata)
if self.arg_groups[OnnxShapeInferenceArgs].force_fallback:
_, layerwise_meta = self.arg_groups[OnnxShapeInferenceArgs].fallback_inference(model)
graph = get_graph()
tools_util.set_shapes_from_layerwise_meta(graph, layerwise_meta)
if args.cleanup:
graph = get_graph()
graph.cleanup()
if graph is not None:
model = gs.export_onnx(graph)
return model
def do_model_processing(model):
if args.fold_constants:
model = onnx_backend.fold_constants(model, num_passes=args.num_const_fold_passes,
do_shape_inference=self.arg_groups[OnnxShapeInferenceArgs].do_shape_inference,
fold_shapes=args.fold_shapes, partitioning=args.partitioning)
return model
model = super().load_model()
model = do_graph_processing(model)
model = do_model_processing(model)
super().save_model(model)
|
StarcoderdataPython
|
5078474
|
<filename>features/dev_florian/wpfconstanst.py
# -*- coding: utf-8 -*-
'''
Created on 2017-07-24
@author: <NAME>
'''
import win32con
constants = {
"WS_OVERLAPPED": win32con.WS_OVERLAPPED,
"WS_POPUP": win32con.WS_POPUP,
"WS_CHILD": win32con.WS_CHILD,
"WS_MINIMIZE": win32con.WS_MINIMIZE,
"WS_VISIBLE": win32con.WS_VISIBLE,
"WS_DISABLED": win32con.WS_DISABLED,
"WS_CLIPSIBLINGS": win32con.WS_CLIPSIBLINGS,
"WS_CLIPCHILDREN": win32con.WS_CLIPCHILDREN,
"WS_MAXIMIZE": win32con.WS_MAXIMIZE,
"WS_BORDER": win32con.WS_BORDER,
"WS_DLGFRAME": win32con.WS_DLGFRAME,
"WS_VSCROLL": win32con.WS_VSCROLL,
"WS_HSCROLL": win32con.WS_HSCROLL,
"WS_SYSMENU": win32con.WS_SYSMENU,
"WS_THICKFRAME": win32con.WS_THICKFRAME,
"WS_GROUP": win32con.WS_GROUP,
"WS_TABSTOP": win32con.WS_TABSTOP,
"WS_MINIMIZEBOX": win32con.WS_MINIMIZEBOX,
"WS_MAXIMIZEBOX": win32con.WS_MAXIMIZEBOX,
"WS_CAPTION": win32con.WS_CAPTION,
"WS_TILED": win32con.WS_TILED,
"WS_ICONIC": win32con.WS_ICONIC,
"WS_SIZEBOX": win32con.WS_SIZEBOX,
"WS_TILEDWINDOW": win32con.WS_TILEDWINDOW,
"WS_OVERLAPPEDWINDOW": win32con.WS_OVERLAPPEDWINDOW,
"WS_POPUPWINDOW": win32con.WS_POPUPWINDOW,
"WS_CHILDWINDOW": win32con.WS_CHILDWINDOW,
}
def check(input):
result = ""
for key,value in constants.iteritems():
if (input & value) == abs(value):
result += key + "\r\n"
return result
exconstants = {
"WS_EX_DLGMODALFRAME": win32con.WS_EX_DLGMODALFRAME,
"WS_EX_NOPARENTNOTIFY": win32con.WS_EX_NOPARENTNOTIFY,
"WS_EX_TOPMOST": win32con.WS_EX_TOPMOST,
"WS_EX_ACCEPTFILES": win32con.WS_EX_ACCEPTFILES,
"WS_EX_TRANSPARENT": win32con.WS_EX_TRANSPARENT,
"WS_EX_MDICHILD": win32con.WS_EX_MDICHILD,
"WS_EX_TOOLWINDOW": win32con.WS_EX_TOOLWINDOW,
"WS_EX_WINDOWEDGE": win32con.WS_EX_WINDOWEDGE,
"WS_EX_CLIENTEDGE": win32con.WS_EX_CLIENTEDGE,
"WS_EX_CONTEXTHELP": win32con.WS_EX_CONTEXTHELP,
"WS_EX_RIGHT": win32con.WS_EX_RIGHT,
"WS_EX_LEFT": win32con.WS_EX_LEFT,
"WS_EX_RTLREADING": win32con.WS_EX_RTLREADING,
"WS_EX_LTRREADING": win32con.WS_EX_LTRREADING,
"WS_EX_LEFTSCROLLBAR": win32con.WS_EX_LEFTSCROLLBAR,
"WS_EX_RIGHTSCROLLBAR": win32con.WS_EX_RIGHTSCROLLBAR,
"WS_EX_CONTROLPARENT": win32con.WS_EX_CONTROLPARENT,
"WS_EX_STATICEDGE": win32con.WS_EX_STATICEDGE,
"WS_EX_APPWINDOW": win32con.WS_EX_APPWINDOW,
"WS_EX_LAYERED": win32con.WS_EX_LAYERED,
"WS_EX_COMPOSITED": win32con.WS_EX_COMPOSITED,
"WS_EX_NOACTIVATE": win32con.WS_EX_NOACTIVATE,
"WS_EX_NOINHERITLAYOUT": win32con.WS_EX_NOINHERITLAYOUT,
"WS_EX_NOPARENTNOTIFY": win32con.WS_EX_NOPARENTNOTIFY,
"WS_EX_OVERLAPPEDWINDOW": win32con.WS_EX_OVERLAPPEDWINDOW,
"WS_EX_PALETTEWINDOW": win32con.WS_EX_PALETTEWINDOW,
}
def check2(input):
result = ""
for key,value in exconstants.iteritems():
if (input & value) == abs(value):
result += key + "\r\n"
return result
|
StarcoderdataPython
|
343320
|
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
import dace.library
import dace.properties
import dace.sdfg.nodes
from dace.transformation.transformation import ExpandTransformation
from .. import environments
@dace.library.expansion
class ExpandAllgatherMPI(ExpandTransformation):
environments = [environments.mpi.MPI]
@staticmethod
def expansion(node, parent_state, parent_sdfg, n=None, **kwargs):
(inbuffer, in_count_str), (outbuffer, out_count_str) = node.validate(parent_sdfg, parent_state)
in_mpi_dtype_str = dace.libraries.mpi.utils.MPI_DDT(inbuffer.dtype.base_type)
out_mpi_dtype_str = dace.libraries.mpi.utils.MPI_DDT(outbuffer.dtype.base_type)
if inbuffer.dtype.veclen > 1:
raise (NotImplementedError)
code = f"""
int _commsize;
MPI_Comm_size(MPI_COMM_WORLD, &_commsize);
MPI_Allgather(_inbuffer, {in_count_str}, {in_mpi_dtype_str},
_outbuffer, {out_count_str}/_commsize, {out_mpi_dtype_str},
MPI_COMM_WORLD);
"""
tasklet = dace.sdfg.nodes.Tasklet(node.name,
node.in_connectors,
node.out_connectors,
code,
language=dace.dtypes.Language.CPP)
return tasklet
@dace.library.node
class Allgather(dace.sdfg.nodes.LibraryNode):
# Global properties
implementations = {
"MPI": ExpandAllgatherMPI,
}
default_implementation = "MPI"
def __init__(self, name, *args, **kwargs):
super().__init__(name, *args, inputs={"_inbuffer"}, outputs={"_outbuffer"}, **kwargs)
def validate(self, sdfg, state):
"""
:return: A three-tuple inbuffer, outbuffer of the data descriptors in the
parent SDFG.
"""
inbuffer, outbuffer = None, None
for e in state.out_edges(self):
if e.src_conn == "_outbuffer":
outbuffer = sdfg.arrays[e.data.data]
for e in state.in_edges(self):
if e.dst_conn == "_inbuffer":
inbuffer = sdfg.arrays[e.data.data]
in_count_str = "XXX"
out_count_str = "XXX"
for _, src_conn, _, _, data in state.out_edges(self):
if src_conn == '_outbuffer':
dims = [str(e) for e in data.subset.size_exact()]
out_count_str = "*".join(dims)
for _, _, _, dst_conn, data in state.in_edges(self):
if dst_conn == '_inbuffer':
dims = [str(e) for e in data.subset.size_exact()]
in_count_str = "*".join(dims)
return (inbuffer, in_count_str), (outbuffer, out_count_str)
|
StarcoderdataPython
|
8137164
|
import os
import json
from pathlib import Path
import colorful
def load_theme():
try:
theme_name = os.environ['PURE_THEME']
except KeyError:
theme_name = 'tomorrow'
finally:
theme_path = Path(os.getcwd() + '/pure/theme/' + theme_name + '.json')
with open(str(theme_path), 'r') as theme:
scheme = json.load(theme)
colorful.use_true_colors()
colorful.use_palette(scheme)
return theme_name, scheme
def style(color):
return getattr(colorful, color).style[0]
|
StarcoderdataPython
|
3327769
|
<reponame>sethshill/final
from util import d
class BaseGamePad(object):
def __init__(self):
pass
def getKeys(self):
raise Exception("getKeys must be overriden!")
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def close(self):
pass
def setLights(self, data):
pass
def setLightsOff(self, count):
pass
|
StarcoderdataPython
|
9762271
|
<filename>model/migrations/0012_auto_20200826_0703.py<gh_stars>1-10
# Generated by Django 3.0.7 on 2020-08-26 07:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('model', '0011_auto_20200824_0753'),
]
operations = [
migrations.RenameModel(
old_name='Conferences',
new_name='Conference',
),
]
|
StarcoderdataPython
|
9733427
|
<reponame>7mp/django-dynamic-fixture<gh_stars>0
# -*- coding: utf-8 -*-
from django_dynamic_fixture import new
from django.conf import settings
from django.db import connection
from django import db
from django_dynamic_fixture.django_helper import get_models_of_an_app, is_model_managed, get_unique_model_name, get_apps
class Report(object):
def __init__(self):
self.data = []
self.errors = []
def add_record(self, app, model, queries_insert, queries_update):
self.data.append((app, model, queries_insert, queries_update))
def add_error(self, msg):
self.errors.append(msg)
def export_csv(self, order_by_quantity_queries=False):
if order_by_quantity_queries:
self.data.sort(key=lambda t: t[2], reverse=True)
print 'APP.MODEL;QUERIES ON INSERT;QUERIES ON UPDATE'
for app, model, queries_insert, queries_update in self.data:
print '%s;%s;%s' % (get_unique_model_name(model), queries_insert, queries_update)
for err in self.errors:
print err
class CountQueriesOnSave(object):
def __init__(self):
self.report = Report()
def count_queries_for_model(self, app, model):
try:
model_instance = new(model, print_errors=False)
except Exception as e:
self.report.add_error('- Could not prepare %s: %s' % (get_unique_model_name(model), str(e)))
return
db.reset_queries()
try:
model_instance.save()
except Exception as e:
self.report.add_error('- Could not insert %s: %s' % (get_unique_model_name(model), str(e)))
return
queries_insert = len(connection.queries)
db.reset_queries()
try:
model_instance.save()
except Exception as e:
self.report.add_error('- Could not update %s: %s' % (get_unique_model_name(model), str(e)))
return
queries_update = len(connection.queries)
self.report.add_record(app, model, queries_insert, queries_update)
def execute(self, app_labels=[], exclude_app_labels=[]):
settings.DEBUG = True
apps = get_apps(application_labels=app_labels, exclude_application_labels=exclude_app_labels)
for app in apps:
models = get_models_of_an_app(app)
for model in models:
if not is_model_managed(model):
continue
self.count_queries_for_model(app, model)
return self.report
|
StarcoderdataPython
|
11396935
|
<gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf8 -*-
#
# Copyright 2014,2018 <NAME> <<EMAIL>>
#
# This file is part of MFRC522-Python
# MFRC522-Python is a simple Python implementation for
# the MFRC522 NFC Card Reader for the Raspberry Pi.
#
# MFRC522-Python is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MFRC522-Python is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MFRC522-Python. If not, see <http://www.gnu.org/licenses/>.
import RPi.GPIO as GPIO
import MFRC522
import signal
continue_reading = True
# function to perform cleanup when the script is aborted
def end_read(signal,frame):
global continue_reading
print("Ctrl+C captured, ending read.")
continue_reading = False
GPIO.cleanup()
signal.signal(signal.SIGINT, end_read)
# Create an object of class MFRC522
MIFAREReader = MFRC522.MFRC522()
# Continue looking for cards till the script is manually aborted
while continue_reading:
# Search for NFC cards
(status,TagType) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
# Check if card detected
if status == MIFAREReader.MI_OK:
print("Card detected")
# get the data of the card that been touched
(status,uid) = MIFAREReader.MFRC522_Anticoll()
# If UID received, continue
if status == MIFAREReader.MI_OK:
# Print UID in console
print("Card read UID: %s,%s,%s,%s" % (uid[0], uid[1], uid[2], uid[3]))
# Standard encryption key for the NFC card (default)
key = [<KEY>
MIFAREReader.MFRC522_SelectTag(uid)
# Authorization
status = MIFAREReader.MFRC522_Auth(MIFAREReader.PICC_AUTHENT1A, 8, key, uid)
print("\n")
# Check if authenticated
if status == MIFAREReader.MI_OK:
# This is the data that we want to write into the NFC card
data = [99, 11, 55, 66, 44, 111, 222, 210, 125, 153, 136, 199, 144, 177, 166, 188]
for x in range(0,16):
data.append(0xFF)
print("Sector 8 looked like this:")
# Block 8 lesen
MIFAREReader.MFRC522_Read(8)
print("\n")
print("Sector 8 will now be filled with 0xFF:")
# Write data into the NFC card
MIFAREReader.MFRC522_Write(8, data)
print("\n")
print("It now looks like this:")
# Checks how the card looks like after it's been written
MIFAREReader.MFRC522_Read(8)
print("\n")
MIFAREReader.MFRC522_StopCrypto1()
# Make sure that card reading is set.
continue_reading = False
else:
print("Authentification error")
|
StarcoderdataPython
|
6562858
|
<gh_stars>1000+
import ruia
from lxml import etree
HTML = """
<body>
<div class="title" href="/">Ruia Documentation</div>
<ul>
<li class="tag" href="./easy.html">easy</li>
<li class="tag" href="./fast.html">fast</li>
<li class="tag" href="./powerful.html">powerful</li>
</ul>
</body>
"""
html = etree.HTML(HTML)
def test_element_field():
ul = ruia.ElementField(css_select="ul")
assert len(ul.extract(html_etree=html).xpath('//li')) == 3
def test_text_field():
title = ruia.TextField(css_select=".title", default="Untitled")
assert title.extract(html_etree=html) == "Ruia Documentation"
tags = ruia.TextField(css_select=".tag", default="No tag", many=True)
assert tags.extract(html_etree=html) == ["easy", "fast", "powerful"]
def test_attr_field():
title = ruia.AttrField(css_select=".title", attr="href", default="Untitled")
assert title.extract(html_etree=html) == "/"
tags = ruia.AttrField(css_select=".tag", attr="href", default="No tag", many=True)
assert tags.extract(html_etree=html)[0] == "./easy.html"
def test_html_field():
title = ruia.HtmlField(css_select=".title", default="Untitled")
assert (
title.extract(html_etree=html)
== '<div class="title" href="/">Ruia Documentation</div>\n'
)
tags = ruia.HtmlField(css_select=".tag", default="No tag", many=True)
assert (
tags.extract(html_etree=html)[1]
== '<li class="tag" href="./fast.html">fast</li>\n '
)
def test_regex_field():
title = ruia.RegexField(re_select='<div class="title" href="(.*?)">(.*?)</div>')
assert title.extract(html=HTML)[0] == "/"
assert title.extract(html=HTML)[1] == "Ruia Documentation"
tags = ruia.RegexField(
re_select='<li class="tag" href="(?P<href>.*?)">(?P<text>.*?)</li>', many=True
)
result = tags.extract(html=HTML)
assert isinstance(result, list)
assert len(result) == 3
assert isinstance(result[0], dict)
assert result[0]["href"] == "./easy.html"
|
StarcoderdataPython
|
9771864
|
<filename>lymph/tests/integration/test_zookeeper_discovery.py<gh_stars>10-100
import gevent
from kazoo.client import KazooClient
from kazoo.handlers.gevent import SequentialGeventHandler
from lymph.core.decorators import rpc
from lymph.core.interfaces import Interface
from lymph.discovery.zookeeper import ZookeeperServiceRegistry
from lymph.events.null import NullEventSystem
from lymph.testing import LymphIntegrationTestCase
class Upper(Interface):
service_type = 'upper'
@rpc()
def upper(self, text=None):
return text.upper()
class ZookeeperIntegrationTest(LymphIntegrationTestCase):
use_zookeeper = True
def setUp(self):
super(ZookeeperIntegrationTest, self).setUp()
self.events = NullEventSystem()
self.upper_container, interface = self.create_container(Upper, 'upper')
self.lymph_client = self.create_client()
def create_registry(self, **kwargs):
zkclient = KazooClient(self.hosts, handler=SequentialGeventHandler())
return ZookeeperServiceRegistry(zkclient)
def test_lookup(self):
service = self.lymph_client.container.lookup('upper')
self.assertEqual(len(service), 1)
self.assertEqual(list(service)[0].endpoint, self.upper_container.endpoint)
def test_upper(self):
reply = self.lymph_client.request('upper', 'upper.upper', {'text': 'foo'})
self.assertEqual(reply.body, 'FOO')
def test_ping(self):
reply = self.lymph_client.request('upper', 'lymph.ping', {'payload': 42})
self.assertEqual(reply.body, 42)
def test_status(self):
reply = self.lymph_client.request('upper', 'lymph.status', {})
self.assertEqual(reply.body, {
'endpoint': self.upper_container.endpoint,
'identity': self.upper_container.identity,
})
def test_get_metrics(self):
reply = self.lymph_client.request('upper', 'lymph.get_metrics', {})
self.assertIsInstance(reply.body, list)
def test_connection_loss(self):
service = self.lymph_client.container.lookup('upper')
self.assertEqual(
[i.identity for i in service],
[self.upper_container.identity],
)
self.upper_container.service_registry.client.stop()
self.upper_container.service_registry.client.start()
gevent.sleep(.1) # XXX: give zk a chance to reconnect
self.assertEqual(
[i.identity for i in service],
[self.upper_container.identity],
)
|
StarcoderdataPython
|
9705469
|
# encoding=utf-8
import unittest
from Calc import Calc
class MyTest(unittest.TestCase):
@classmethod
def setUpClass(self):
print("init Calc before unittest")
self.c = Calc()
# rename the four methods bellow, make sure print queue will be :
# P.S.: test case must starts with ‘test’
def test_a_add(self):
print("run add()")
self.assertEqual(self.c.add(1, 2, 12), 15, 'test add fail')
def test_b_sub(self):
print("run sub()")
self.assertEqual(self.c.sub(2, 1, 3), -2, 'test sub fail')
def test_c_mul(self):
print("run mul()")
self.assertEqual(Calc.mul(self,2, 3, 5), 30, 'test mul fail')
def test_d_div(self):
print("run div()")
self.assertEqual(Calc.div(self,8, 2, 4), 1, 'test div fail')
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11342377
|
<reponame>derekwacks/RaspberryPiKeithleyCommunication
"""
Adapted from Keithley User manual to communcate with Raspberry PI using Pyvisa
7.16.19
<NAME>
"""
import visa
import time
rm = visa.ResourceManager('@py')
address = "ASRL/dev/ttyUSB0::INSTR"
inst = rm.open_resource(address)
inst.write("*RST") print(inst.query("*IDN?"))
inst.write("FORM:DATA ASCII")
inst.write(“:SYSt:BEEP 100, 3“)
inst.close()
|
StarcoderdataPython
|
32169
|
<reponame>danieltes/tp_solver
import uuid
from PIL import Image
import graphviz as gv
styles = {
'graph': {
'label': 'Discreta - Representación de AST',
'fontsize': '16',
'fontcolor': 'white',
'bgcolor': '#333333',
},
'nodes': {
'fontname': 'Helvetica',
'shape': 'hexagon',
'fontcolor': 'white',
'color': 'white',
'style': 'filled',
'fillcolor': '#006699',
},
'edges': {
'style': 'dashed',
'color': 'white',
'arrowhead': 'open',
'fontname': 'Courier',
'fontsize': '12',
'fontcolor': 'white',
}
}
def _render_children(g, n, parent=None):
id = str(uuid.uuid1())
if n.op is not None:
g.node(id, n.op)
if parent is not None:
g.edge(parent, id)
for each in n.children:
_render_children(g, each, id)
else:
g.node(id, n.value)
g.edge(parent, id)
def _set_styles(graph):
graph.graph_attr.update(
('graph' in styles and styles['graph']) or {}
)
graph.node_attr.update(
('nodes' in styles and styles['nodes']) or {}
)
graph.edge_attr.update(
('edges' in styles and styles['edges']) or {}
)
return graph
def render_tree(tree):
graph = gv.Digraph(
format='jpg', comment="Arbol de representación semántico")
_set_styles(graph)
_render_children(graph, tree)
filename = graph.render("ast", view=True)
|
StarcoderdataPython
|
5059887
|
<reponame>molguin92/gabriel-lego-py3
from numpy import array
# Automatically generated task with 45 steps
# Labels: nothing:0, white:1, green:2, yellow:3, red:4, blue:5, black:6,
# unsure:7
bitmaps = \
[array([[4, 4, 4, 4, 4, 4]]),
array([[3, 3, 3, 3, 3, 3],
[4, 4, 4, 4, 4, 4]]),
array([[4, 4, 4, 4, 4, 4]]),
array([[2, 0, 0, 0, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 0, 0, 3, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 0, 3, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 0, 3, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 0, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 3, 3, 3],
[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[4, 0, 3, 3, 3, 3],
[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[4, 5, 3, 3, 3, 3],
[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[4, 0, 3, 3, 3, 3],
[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 3, 3, 3],
[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 2, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 0, 3, 5, 4],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 0, 3, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 5, 0, 3, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 0, 0, 3, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[2, 0, 0, 0, 0, 0],
[4, 4, 4, 4, 4, 4]]),
array([[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 0, 0, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 0, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 5, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 5, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 5, 5, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 5, 3, 5, 5, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 5, 5, 5],
[4, 4, 4, 4, 4, 4]]),
array([[3, 0, 3, 5, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 5, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 0, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 0, 0, 5, 0],
[4, 4, 4, 4, 4, 4]]),
array([[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 0, 0, 2, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 0, 2, 0],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 3, 0, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 4, 3, 0, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 0, 0, 4, 4, 4],
[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 2, 0, 4, 4, 4],
[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 2, 2, 4, 4, 4],
[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[5, 2, 2, 4, 4, 4],
[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 2, 2, 4, 4, 4],
[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]]),
array([[0, 2, 0, 4, 4, 4],
[4, 4, 3, 4, 2, 5],
[4, 4, 4, 4, 4, 4]])]
|
StarcoderdataPython
|
8056152
|
<reponame>dwhitte7/ntap-automation<filename>storagegrid/python/01_get_grid_details.py<gh_stars>0
#!/usr/bin/env python3
################################################################################
#
# Title: 01_get_grid_details.py
# Author: <NAME>
# Date: 2020-03-17
# Description: Get grid information
#
# Resources:
#
# URLs:
#
################################################################################
import json, os, sys
import requests
### Define Functions
def _url(path):
return 'https://' + global_vars["SG_ADMIN_NODE"] + '/api/v3' + path
def get_info(endpoint, auth_token):
return requests.get(endpoint,
headers={'accept': 'application/json', 'authorization': 'Bearer {}'.format(auth_token)},
verify=False).json()['data']
### Step 1 - Read in global variables
with open(os.path.dirname(sys.argv[0])+'/../global.vars') as json_file:
global_vars = json.load(json_file)
### Step 2 - Set Authorization Header
auth_body = {
"username": global_vars['SG_ADMIN_USER'],
"password": global_<PASSWORD>['<PASSWORD>'],
"cookie": "false",
"csrfToken": "false"
}
### Step 3 - Get Grid Authorization Token
grid_auth = requests.post('https://' + global_vars["SG_ADMIN_NODE"] + '/api/v3/authorize',
data=json.dumps(auth_body),
headers={'Content-Type':'application/json', 'accept':'application/json'},
verify=False).json()['data']
### Step 4 - Get Grid Info - Version & Topology
version_resp = get_info(_url('/grid/config/product-version'), grid_auth)
topology_resp = get_info(_url('/grid/health/topology'), grid_auth)
### Step 5 - Print Info
print('\n{0:13}{1}'.format('Name:', topology_resp['name']))
print('{0:13}{1}\n'.format('Version:', version_resp['productVersion']))
for site in topology_resp['children']:
print('Data Center: {}'.format(site['name']))
for node in site['children']:
print(' - Node Name: {0:20} Type: {1}'.format(node['name'], node['type']))
print('\n')
|
StarcoderdataPython
|
1896146
|
<filename>ros/src/twist_controller/twist_controller.py<gh_stars>0
from yaw_controller import YawController
from pid import PID
from lowpass import LowPassFilter
import rospy
GAS_DENSITY = 2.858
ONE_MPH = 0.44704
class Controller(object):
def __init__(self,
wheel_base,
steer_ratio,
min_speed,
max_lat_accel,
max_steer_angle,
accel_limit,
decel_limit,
loop_frequency,
vehicle_mass,
wheel_radius):
self.wheel_base = wheel_base
self.steer_ratio = steer_ratio
self.max_steer_angle = max_steer_angle
self.vehicle_mass = vehicle_mass
self.wheel_radius = wheel_radius
self.steering_controller = YawController(wheel_base, steer_ratio, min_speed, max_lat_accel, max_steer_angle)
self.throttle_controller = PID(0.15, 0.0, 0.09, mn=decel_limit, mx=accel_limit)
self.low_pass_filter = LowPassFilter(12.0, 1)
self.last_timestamp = None
def control(self, target_angular_velocity, target_linear_velocity, current_angular_velocity, current_linear_velocity, dbw_enabled):
# Return throttle, brake, steer
if not dbw_enabled:
self.reset()
return 0., 0., 0.
steer = self.steering_controller.get_steering(target_linear_velocity, target_angular_velocity, current_linear_velocity)
throttle = 0.
brake = 0.
current_timestamp = rospy.Time.now()
if self.last_timestamp != None:
dt = (current_timestamp - self.last_timestamp).nsecs / 1e9
cte = target_linear_velocity - current_linear_velocity
acceleration = self.throttle_controller.step(cte, dt)
filtvalue = self.low_pass_filter.filt(acceleration)
if self.low_pass_filter.ready:
acceleration = self.low_pass_filter.get()
if acceleration > 0:
throttle = acceleration
else:
brake = self.vehicle_mass * abs(acceleration) * self.wheel_radius
self.last_timestamp = current_timestamp
rospy.loginfo('SENDING - [throttle,brake,steer]:[{:.4f},{:.4f},{:.4f}], [cA,cL]:[{:.4f},{:.4f}]m [tA, tL]:[{:.4f},{:.4f}]'.format(throttle, brake, steer,current_angular_velocity, current_linear_velocity,target_angular_velocity, target_linear_velocity))
return throttle, brake, steer
def reset(self):
"""
Reset the controller's state.
"""
self.throttle_controller.reset()
self.low_pass_filter.reset()
|
StarcoderdataPython
|
1746919
|
import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
from pyarrow import fs
def parsePath(path):
parts = path.split('/', 1)
bucket = parts[0] if len(parts) >= 1 else ''
local_path = '/' + parts[1] if len(parts) > 1 else '/'
return bucket, local_path
class BucketRouteHandler(APIHandler):
@tornado.web.authenticated
def get(self, local_path):
bucket, path = parsePath(local_path)
print(bucket, path)
fs_client = fs.LocalFileSystem()
file_info_list = fs_client.get_file_info(
fs.FileSelector(path, recursive=False))
files = []
dirs = []
for info in file_info_list:
if info.type.value == 2:
# File type
files.append({'name': info.base_name, 'ext': info.extension,
'size': info.size, 'mtime': info.mtime.isoformat()})
elif info.type.value == 3:
# Directory type
dirs.append({'name': info.base_name,
'mtime': info.mtime.isoformat()})
self.finish(json.dumps({'files': files, 'dirs': dirs}))
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
route_pattern = url_path_join(base_url, "jupyterlab_filesystem", "bucket")
handlers = [("{}/(.*)".format(route_pattern), BucketRouteHandler)]
web_app.add_handlers(host_pattern, handlers)
|
StarcoderdataPython
|
1896352
|
<gh_stars>1-10
import torch
from transformers import BertTokenizer, BertPreTrainedModel, BertModel, BertConfig, AdamW, get_linear_schedule_with_warmup
from torch import nn
from torch.utils.data import Dataset, DataLoader
from multitask_bert_entitity_classifier import COVID19TaskDataset, TokenizeCollator, make_predictions_on_dataset, split_data_based_on_subtasks
from utils import load_from_pickle, get_multitask_instances_for_valid_tasks, split_multitask_instances_in_train_dev, get_TP_FP_FN, add_marker_for_loss_ignore
import random
import json
import argparse
TEXT_TO_TWEET_ID_PATH = "../test/txt_2_tid.json"
POSSIBLE_BATCH_SIZE=8
IGNORE_TASKS_DICT = {"tested_positive": ["part1.Response", "gender_male", "gender_female", "relation"],
"tested_negative": ["part1.Response", "how_long", "gender_male", "gender_female", "relation"],
"can_not_test": ["part1.Response", "symptoms", "relation"],
"death": ["part1.Response", "symptoms", "relation"],
"cure": ["part1.Response", "opinion"]
}
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--data_file", help="Path to the pickle file that contains the training instances", type=str, required=True)
parser.add_argument("-t", "--task", help="Event for which we want to train the baseline", type=str, required=True)
parser.add_argument("-s", "--save_path", help="Path to the directory where saved model is.", type=str, required=True)
parser.add_argument("-o", "--output_dir", help="Path to the output directory where we will save all the model results", type=str, required=True)
parser.add_argument("--device", type=str, default="cuda", help="name of the device to be used for training")
parser.add_argument("--sentence_level_classify", dest="sentence_level_classify", action="store_true", default=True)
args = parser.parse_args()
device = args.device="cuda"
args.sentence_level_classify=True
IGNORE_TASKS = IGNORE_TASKS_DICT[args.task]
TASK_TO_TID_KEY = {'tested_positive': 'positive', 'tested_negative': 'negative', 'can_not_test': 'can_not_test',
'cure': 'cure', 'death': 'death'}
text_to_tweetid = json.load(open(TEXT_TO_TWEET_ID_PATH, 'r'))[TASK_TO_TID_KEY[args.task]]
def log_multitask_data_statistics(data, subtasks):
print(f"Total instances in the data = {len(data)}")
pos_counts = {subtask: sum(subtask_labels_dict[subtask][1] for _,_,_,_,_,_,_,subtask_labels_dict in data) for subtask in subtasks}
neg_counts = dict()
for subtask in subtasks:
neg_counts[subtask] = len(data) - pos_counts[subtask]
print(f"Subtask:{subtask:>15}\tPositive labels = {pos_counts[subtask]}\tNegative labels = {neg_counts[subtask]}")
return len(data), pos_counts, neg_counts
class MultiTaskBertForCovidEntityClassification(BertPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob * 2)
self.subtasks = config.subtasks
self.classifier_in_size = config.hidden_size
# For sentence level classification
if args.sentence_level_classify:
self.sentence_classification_att = nn.Linear(config.hidden_size, 1)
self.sentence_classifier_1 = nn.Sequential(nn.Linear(config.hidden_size, 50), nn.Dropout(0.1))
self.sentence_classifier_2 = nn.Linear(50, 2)
self.sent_fuse_skip = nn.Sequential(nn.Linear(2,4), nn.LeakyReLU(), nn.Linear(4,2))
# self.classifier_in_size += 50
# We will create a dictionary of classifiers based on the number of subtasks
self.classifiers = nn.ModuleDict()
for subtask in self.subtasks:
if subtask != "part1.Response":
self.classifiers[subtask] = nn.Linear(self.classifier_in_size, config.num_labels)
self.context_vectors = nn.ModuleDict()
for subtask in self.subtasks:
if subtask != "part1.Response":
self.context_vectors[subtask] = nn.Embedding(1,config.hidden_size)
# self.att_taskwise_mlp[subtask] = nn.Linear(config.
# self.subtask_to_id = {s:i for i,s in enumerate(self.subtasks)}
# self.id_to_subtask = {i:s for s,i in self.subtask_to_id.items()}
# self.ids_long_tensor = torch.LongTensor([i for i in id_to_subtask.keys()])
for task in IGNORE_TASKS:
if task == "part1.Response":
continue
self.classifiers[task].weight.requires_grad = False
self.classifiers[task].bias.requires_grad = False
torch.nn.init.zeros_(self.classifiers[task].weight)
devic = self.classifiers[task].weight.device
self.classifiers[task].bias.data = torch.tensor([10.0, -10.0]).to(devic) # Only predict negative class.
self.init_weights()
self.norm_probs = lambda x: x / x.sum(1).unsqueeze(-1)
def forward(
self,
input_ids,
entity_start_positions,
entity_end_positions,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
label_weight=None
):
assert attention_mask == None
attention_mask = (input_ids != 0) * 1 # No weights for <PAD> input
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
# Sentence level classification:
if args.sentence_level_classify:
# pad_mask = (input_ids != 0) # No weights for <PAD> input
# att_scores = self.sentence_classification_att(outputs[0]) * pad_mask.unsqueeze(-1) # Calculate score
# att_scores = torch.softmax(att_scores, dim=1) # Softmax across the words.
sentence_pooled = outputs[0][:, 0, :] #
# sentence_pooled = (outputs[0] * att_scores).sum(1) # Get a single sentence vector for each sentence.
sentence_classifier_feats = self.sentence_classifier_1(sentence_pooled)
sentence_logits = self.sentence_classifier_2(sentence_classifier_feats)
detached_sent_logits = sentence_logits.detach()
sent_log_skip_add = self.sent_fuse_skip(sentence_logits)
# NOTE: outputs[0] has all the hidden dimensions for the entire sequence
# We will extract the embeddings indexed with entity_start_positions
all_output = outputs[0]
all_output = self.dropout(all_output)
# Entity_mask
entity_mask = torch.arange(input_ids.shape[1]).expand(input_ids.shape[0], -1).to(input_ids.device)
entity_mask = (entity_mask >= entity_start_positions[:, 1:2]) & (entity_mask <= entity_end_positions[:, 1:2])
entity_mask = ~ entity_mask.unsqueeze(-1)
# Get logits for each subtask
if args.sentence_level_classify:
logits = dict() # {subtask: self.classifiers[subtask](pooled_output) for subtask in self.classifiers.keys()}
for subtask in self.context_vectors.keys():
att_weights = torch.matmul(all_output, self.dropout(self.context_vectors[subtask].weight.T))
att_weights = att_weights.masked_fill(entity_mask, -1000)
att_weights = torch.softmax(att_weights, 1)
pooled_output = torch.sum(all_output * att_weights, 1)
logits[subtask] = self.classifiers[subtask](pooled_output)
if subtask not in IGNORE_TASKS:
logits[subtask] = sent_log_skip_add + logits[subtask] # self.fuse_classify[subtask](sentence_logits) + logits[subtask]
# else:
# logits = {subtask: torch.softmax(self.classifiers[subtask](pooled_output), 1) for subtask in self.classifiers.keys()}
if args.sentence_level_classify:
logits["part1.Response"] = sentence_logits
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = nn.CrossEntropyLoss(reduction='none')
loss = 0
for i, subtask in enumerate(self.subtasks):
if (args.sentence_level_classify and subtask == "part1.Response") or subtask not in IGNORE_TASKS:
# if loss == None:
# this_loss = loss_fct(logits[subtask].view(-1, self.num_labels) , labels[subtask].view(-1)) * label_weight[subtask]
# if subtask == "part1.Response":
# this_loss *= LOSS_SCALE['part1']
# else:
# this_loss *= LOSS_SCALE['others']
# loss = this_loss.mean()
# else:
this_loss = loss_fct(logits[subtask].view(-1, self.num_labels), labels[subtask].view(-1)) * label_weight[subtask]
if subtask == "part1.Response":
this_loss *= LOSS_SCALE['part1']
else:
this_loss *= LOSS_SCALE['others']
loss += this_loss.mean()
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
def get_chunk_tweet_id(data, prediction_scores, THRESHOLD=0.5): # code from get_TP_FP_FN in utils.py
predicted_chunks_for_each_instance = dict()
for (text, chunk, _, _, _, _, _, _, _), prediction_score in zip(data, prediction_scores):
original_text = text
predicted_chunks_for_each_instance.setdefault(original_text, set())
predicted_chunks = predicted_chunks_for_each_instance[original_text]
if prediction_score > THRESHOLD:
# Save this prediction in the predicted chunks
# print(chunk, prediction_score)
predicted_chunks.add(chunk)
predicted_chunks_for_each_instance[original_text] = predicted_chunks
return {text_to_tweetid[text]:preds for text,preds in predicted_chunks_for_each_instance.items()}
def json_save_predicts(dev_pred_chunks, pred_save_path, question_keys_and_tags):
guesses = {}
i = 0
current_subtasks = dev_pred_chunks.keys()
for subtask in current_subtasks:
question = question_keys_and_tags[subtask]
if i == 0:
for id, pred in dev_pred_chunks[subtask].items():
guesses[id] = {question: list(pred)}
else:
for id, pred in dev_pred_chunks[subtask].items():
guesses[id][question] = list(pred)
i += 1
fo = open(pred_save_path, "w")
json.dump(guesses, fo)
fo.close()
print("Saved predicts as JSON:", pred_save_path)
def main_try(args):
task_instances_dict, tag_statistics, question_keys_and_tags = load_from_pickle(args.data_file)
data, subtasks_list = get_multitask_instances_for_valid_tasks(task_instances_dict, tag_statistics)
data = add_marker_for_loss_ignore(data, 1.0 if False else 0.0)
model_name = "digitalepidemiologylab/covid-twitter-bert"
print("\n\n===========\n\n", subtasks_list, "\n\n===========\n\n")
tokenizer = BertTokenizer.from_pretrained(model_name)
config = BertConfig.from_pretrained(model_name)
config.subtasks = subtasks_list
model = MultiTaskBertForCovidEntityClassification.from_pretrained(model_name, config=config)
# Add new tokens in tokenizer
new_special_tokens_dict = {"additional_special_tokens": ["<E>", "</E>", "<URL>", "@USER"]}
tokenizer.add_special_tokens(new_special_tokens_dict)
# Add the new embeddings in the weights
print("Embeddings type:", model.bert.embeddings.word_embeddings.weight.data.type())
print("Embeddings shape:", model.bert.embeddings.word_embeddings.weight.data.size())
embedding_size = model.bert.embeddings.word_embeddings.weight.size(1)
new_embeddings = torch.FloatTensor(len(new_special_tokens_dict["additional_special_tokens"]), embedding_size).uniform_(-0.1, 0.1)
# new_embeddings = torch.FloatTensor(2, embedding_size).uniform_(-0.1, 0.1)
print("new_embeddings shape:", new_embeddings.size())
new_embedding_weight = torch.cat((model.bert.embeddings.word_embeddings.weight.data,new_embeddings), 0)
model.bert.embeddings.word_embeddings.weight.data = new_embedding_weight
print("Embeddings shape:", model.bert.embeddings.word_embeddings.weight.data.size())
# Update model config vocab size
model.config.vocab_size = model.config.vocab_size + len(new_special_tokens_dict["additional_special_tokens"])
model.load_state_dict(torch.load(args.save_path + "ckpt.pth"))
print("loaded_model")
model.to("cuda")
entity_start_token_id = tokenizer.convert_tokens_to_ids(["<E>"])[0]
entity_end_token_id = tokenizer.convert_tokens_to_ids(["</E>"])[0]
print(f"Task dataset for task: {args.task} loaded from {args.data_file}.")
model_config = dict()
results = dict()
# Split the data into train, dev and test and shuffle the train segment
dev_data = data
print("Dev Data:")
total_dev_size, pos_subtasks_dev_size, neg_subtasks_dev_size = log_multitask_data_statistics(dev_data, model.subtasks)
model_config["dev_data"] = {"size":total_dev_size, "pos":pos_subtasks_dev_size, "neg":neg_subtasks_dev_size}
# Extract subtasks data for dev and test
dev_subtasks_data = split_data_based_on_subtasks(dev_data, model.subtasks)
# Load the instances into pytorch dataset
dev_dataset = COVID19TaskDataset(dev_data)
tokenize_collator = TokenizeCollator(tokenizer, model.subtasks, entity_start_token_id, entity_end_token_id)
dev_dataloader = DataLoader(dev_dataset, batch_size=POSSIBLE_BATCH_SIZE, collate_fn=tokenize_collator)
print("Created dev dataloaders with batch aggregation")
dev_predicted_labels, dev_prediction_scores, dev_gold_labels = make_predictions_on_dataset(dev_dataloader, model, device, args.task + "_dev", True)
# print(dev_predicted_labels['age'][0], dev_prediction_scores['age'][0], dev_gold_labels['age'][0])
assert dev_predicted_labels.keys() == dev_prediction_scores.keys()
assert dev_predicted_labels.keys() == dev_gold_labels.keys()
for st in dev_gold_labels.keys():
print(st,":", len(dev_predicted_labels[st]), len(dev_prediction_scores[st]), len(dev_gold_labels[st]))
dev_threshold = json.load(open(args.save_path + "/results.json", "r"))['best_dev_threshold']
print(dev_threshold)
# [print(k, v) for k,v in get_chunk_tweet_id(dev_subtasks_data['age'], dev_prediction_scores['age'], dev_threshold['age']).items()]
dev_pred_chunks = {}
for subtask in subtasks_list:
if subtask not in IGNORE_TASKS:
dev_pred_chunks[subtask] = get_chunk_tweet_id(dev_subtasks_data[subtask], dev_prediction_scores[subtask],
dev_threshold[subtask])
json_save_predicts(dev_pred_chunks, args.output_dir + "/" + args.task +".json",
{k:v for k,v in question_keys_and_tags})
collect_TP_FP_FN = {"TP": 0.0001, "FP": 0.0001, "FN": 0.0001}
for subtask in model.subtasks:
dev_subtask_data = dev_subtasks_data[subtask]
dev_subtask_prediction_scores = dev_prediction_scores[subtask]
dev_F1, dev_P, dev_R, dev_TP, dev_FP, dev_FN = get_TP_FP_FN(dev_subtask_data, dev_subtask_prediction_scores,
dev_threshold[subtask], task=subtask)
if subtask not in IGNORE_TASKS:
collect_TP_FP_FN["TP"] += dev_TP
collect_TP_FP_FN["FP"] += dev_FP
collect_TP_FP_FN["FN"] += dev_FN
else:
print("IGNORE: ", end = "")
print(f"Subtask:{subtask:>15}\tN={dev_TP + dev_FN}\tF1={dev_F1}\tP={dev_P}\tR={dev_R}\tTP={dev_TP}\tFP={dev_FP}\tFN={dev_FN}")
dev_macro_P = collect_TP_FP_FN["TP"] / (collect_TP_FP_FN["TP"] + collect_TP_FP_FN["FP"])
dev_macro_R = collect_TP_FP_FN["TP"] / (collect_TP_FP_FN["TP"] + collect_TP_FP_FN["FN"])
dev_macro_F1 = (2 * dev_macro_P * dev_macro_R) / (dev_macro_P + dev_macro_R)
print(collect_TP_FP_FN)
print("dev_macro_P:", dev_macro_P, "\ndev_macro_R:", dev_macro_R, "\ndev_macro_F1:", dev_macro_F1,"\n")
main_try(args)
|
StarcoderdataPython
|
50570
|
<gh_stars>1-10
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import copy
STATE_A = 0
STATE_B = 1
STATE_TERMINAL = 2
STATE_START = STATE_A
ACTION_A_RIGHT = 0
ACTION_A_LEFT = 1
EPSILON = 0.1
ALPHA = 0.1
GAMMA = 1
# take 10 actions in B
ACTIONS_B = range(0, 10)
STATE_ACTIONS = [[ACTION_A_RIGHT, ACTION_A_LEFT], ACTIONS_B]
INITIAL_Q = [np.zeros(2), np.zeros(len(ACTIONS_B)), np.zeros(1)]
TRANSITION = [[STATE_TERMINAL, STATE_B], [STATE_TERMINAL] * len(ACTIONS_B)]
def choose_action(state, q_value):
if np.random.binomial(1, EPSILON) == 1:
action = np.random.choice(STATE_ACTIONS[state])
else:
values = q_value[state]
action = np.random.choice([action_ for action_, value_ in enumerate(values) if value_ == np.max(values)])
return action
def take_action(state, action):
if state == STATE_A:
return 0
return np.random.normal(-0.1, 1)
def q_learning(q1, q2=None):
state = STATE_START
left_count = 0
while state != STATE_TERMINAL:
if q2 is None:
action = choose_action(state, q1)
else:
action = choose_action(state, [item1 + item2 for item1, item2 in zip(q1, q2)])
if state == STATE_START and action == ACTION_A_LEFT:
left_count += 1
reward = take_action(state, action)
next_state = TRANSITION[state][action]
if q2 is None:
active_q = q1
target = np.max(active_q[next_state])
else:
if np.random.binomial(1, 0.5) == 1:
active_q = q1
target_q = q2
else:
active_q = q2
target_q = q1
best_action = np.random.choice([action_ for action_, value_ in enumerate(active_q[next_state])
if value_ == np.max(active_q[next_state])])
target = target_q[next_state][best_action]
active_q[state][action] += ALPHA * (reward + GAMMA * target - active_q[state][action])
state = next_state
return left_count
def figure_6_7():
episodes = 300
runs = 100
left_counts_q = np.zeros((runs, episodes))
left_counts_double_q = np.zeros((runs, episodes))
for run in tqdm(range(runs)):
q = copy.deepcopy(INITIAL_Q)
q1 = copy.deepcopy(INITIAL_Q)
q2 = copy.deepcopy(INITIAL_Q)
for ep in range(0, episodes):
left_counts_q[run, ep] = q_learning(q)
left_counts_double_q[run, ep] = q_learning(q1, q2)
left_counts_q = left_counts_q.mean(axis=0)
left_counts_double_q = left_counts_double_q.mean(axis=0)
plt.plot(left_counts_q, label='Q-learning')
plt.plot(left_counts_double_q, label='Double Q-learning')
plt.plot(np.ones(episodes) * 0.05, label='Optimal')
plt.xlabel('Episodes')
plt.ylabel('% left actions from A')
plt.legend()
plt.savefig('./images/figure_6_7.png')
plt.close()
if __name__ == '__main__':
figure_6_7()
|
StarcoderdataPython
|
9789106
|
"""Default core views provided to Websauna."""
|
StarcoderdataPython
|
5185106
|
<reponame>cbf02000/JETEngine
#!/usr/bin/python
# -*- coding: utf-8 -*-
import tornado.ioloop
import tornado.web
import json
import os
import sys
import cgi
import urllib
import urllib2
from flightaware import flightaware
from hotels import hotelsearch
from uber import uber_estimate_time
from nyt_36hours import articlesearch
class NewsHandler(tornado.web.RequestHandler):
def get(self):
self.set_header("Content-Type", 'application/json; charset="utf-8"')
airport_code = self.get_argument('airport', True)
jsontxt = articlesearch(airport_code)
self.write(jsontxt)
class UberHandler(tornado.web.RequestHandler):
def get(self):
self.set_header("Content-Type", 'application/json; charset="utf-8"')
#start_lat, start_long, dest_lat, dest_long
start_lat = self.get_argument('start_lat', True)
start_long = self.get_argument('start_long', True)
dest_lat = self.get_argument('dest_lat', True)
dest_long = self.get_argument('dest_long', True)
jsontxt = uber_estimate_time(start_lat, start_long, dest_lat, dest_long)
self.write(jsontxt)
class HotelHandler(tornado.web.RequestHandler):
def get(self):
self.set_header("Content-Type", 'application/json; charset="utf-8"')
airport_code = self.get_argument('airport', True)
arrival_time = self.get_argument('arrival_time', True)
jsontxt = hotelsearch(airport_code,arrival_time)
self.write(jsontxt)
class FlightHandler(tornado.web.RequestHandler):
def get(self):
self.set_header("Content-Type", 'application/json; charset="utf-8"')
airport_code = self.get_argument('airport', True)
jsontxt = flightaware(airport_code)
#url = "http://www.flightstats.com/go/FlightStatus/flightStatusByAirport.do?airport=SFO&airportQueryDate=2015-11-07&airportQueryTime=16"
#response = urllib2.urlopen(url)
#html = response.read()
self.write(jsontxt)
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.set_header("Content-Type", 'application/json; charset="utf-8"')
lon = self.get_argument('lon', True)
lat = self.get_argument('lat', True)
url = "https://airport.api.aero/airport/nearest/" + lat + "/" + lon + "?user_key=72963f44671d7039fe893d82903bc457"
print url
response = urllib2.urlopen(url)
html = response.read()
jsontxt = html[html.find("airports")+11:-3]
root_o = json.loads(jsontxt)
origCity = root_o["city"]
print origCity
imgurl_orig = "https://ajax.googleapis.com/ajax/services/search/images?v=1.0&q=" + urllib.quote(origCity, safe='') + "&rsz=8"
r_o1 = urllib2.urlopen(imgurl_orig)
root_o1 = json.loads(r_o1.read())
imgurl_o_0 = root_o1["responseData"]["results"][0]["url"]
imgurl_o_1 = root_o1["responseData"]["results"][1]["url"]
imgurl_o_2 = root_o1["responseData"]["results"][2]["url"]
imgAppJson = ', "cityImageURL":["%s","%s","%s"]}' % (imgurl_o_0, imgurl_o_1, imgurl_o_2)
returnJsonText = jsontxt[:-1] + imgAppJson
print returnJsonText
#my_json = json.dumps(jsontxt, ensure_ascii=False)
self.write(returnJsonText)
application = tornado.web.Application([
(r"/nearest_airport.cgi", MainHandler),
(r"/flight.cgi", FlightHandler),
(r"/hotel.cgi", HotelHandler),
(r"/uber.cgi", UberHandler),
(r"/news.cgi", NewsHandler)
])
if __name__ == "__main__":
application.listen(8888)
tornado.ioloop.IOLoop.instance().start()
|
StarcoderdataPython
|
5130568
|
#Definir variables y otros
print("Ejercicio")
Sueldo=0
#Datos de entrada
a=int(input("Años de trabajo:"))
b=int(input("Sueldo de trabajo:"))
#Proceso
if a>=4:
Sueldo=((b*25)/100)+b
elif b<=2000:
Sueldo=((b*25)/100)+b
elif a<=4:
Sueldo=((b*20)/100)+b
elif b>=2000:
Sueldo=((b*20)/100)+b
#Datos de salida
print("El sueldo de navidad es:", Sueldo)
|
StarcoderdataPython
|
9731600
|
<reponame>adam-funk/pytilt
#!/usr/bin/env python3
import matplotlib
matplotlib.use('Agg')
import argparse
import imghdr
import os
import smtplib
import time
import datetime
from email.message import EmailMessage
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import dates
from subprocess import Popen, PIPE
FIGSIZE = (15, 6)
# https://stackoverflow.com/questions/4931376/generating-matplotlib-graphs-without-a-running-x-server
# https://matplotlib.org/gallery/text_labels_and_annotations/date.html
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html#matplotlib.pyplot.subplots
# https://matplotlib.org/api/dates_api.html#matplotlib.dates.MonthLocator
# https://matplotlib.org/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot
# https://matplotlib.org/tutorials/introductory/pyplot.html
def meanr(x):
# ignore NaN (blank fields in the CSV
return round(np.nanmean(x), 1)
def medianr(x):
# ignore NaN (blank fields in the CSV
return round(np.nanmedian(x), 1)
def get_data(options):
data = pd.read_csv(options.data_file, names=['color', 'epoch', 'iso', 'sg', 'c', 'f', 'n'],
index_col='epoch')
data['time'] = pd.to_datetime(data['iso'])
data['date'] = data['time'].dt.date
data['c'] = round(data['c'], 1)
# aggregated by date
columns = [min, meanr, medianr, max]
date_data = data.groupby('date').agg({'sg': columns,
'c': columns}).rename(columns={'meanr': 'mean', 'medianr': 'mdn'})
return data, date_data
def make_plots(options, data, data_by_date):
output_dir = '/tmp/hydrometer-plots-%i' % int(time.time())
os.mkdir(output_dir)
f0 = os.path.join(output_dir, 'density.png')
f1 = os.path.join(output_dir, 'temperature.png')
f2 = os.path.join(output_dir, 'density_date.png')
f3 = os.path.join(output_dir, 'temperature_date.png')
date_html = data_by_date.to_html()
days_locator = dates.DayLocator(interval=1)
days_format = dates.DateFormatter('%d')
plt.ioff()
fig0, ax0 = plt.subplots(figsize=FIGSIZE)
ax0.xaxis.set_major_locator(days_locator)
ax0.xaxis.set_major_formatter(days_format)
ax0.format_xdata = days_format
ax0.grid(True, which='both')
ax0.plot(data['time'], data['sg'])
plt.savefig(f0, dpi=200)
fig1, ax1 = plt.subplots(figsize=FIGSIZE)
ax1.xaxis.set_major_locator(days_locator)
ax1.xaxis.set_major_formatter(days_format)
ax1.format_xdata = days_format
ax1.grid(True, which='both')
ax1.plot(data['time'], data['c'])
plt.savefig(f1, dpi=200)
fig2, ax2 = plt.subplots(figsize=FIGSIZE)
ax2.xaxis.set_major_locator(days_locator)
ax2.xaxis.set_major_formatter(days_format)
ax2.format_xdata = days_format
ax2.grid(True, which='both')
ax2.plot(data_by_date.index, data_by_date['sg'])
plt.savefig(f2, dpi=200)
fig3, ax3 = plt.subplots(figsize=FIGSIZE)
ax3.xaxis.set_major_locator(days_locator)
ax3.xaxis.set_major_formatter(days_format)
ax3.format_xdata = days_format
ax3.grid(True, which='both')
ax3.plot(data_by_date.index, data_by_date['c'])
plt.savefig(f3, dpi=200)
return date_html, (f0, f1, f2, f3)
def send_mail(message, options):
# https://stackoverflow.com/questions/73781/sending-mail-via-sendmail-from-python
if options.mail_command:
p = Popen(["/usr/sbin/sendmail", "-t", "-oi"], stdin=PIPE)
p.communicate(message.as_bytes())
else:
with smtplib.SMTP('localhost') as s:
s.send_message(mail)
return
oparser = argparse.ArgumentParser(description="Plotter for temperature and humidity log",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
oparser.add_argument("-d", dest="data_file",
required=True,
metavar="CSV",
help="CSV input file")
oparser.add_argument("-m", dest="mail",
action='append',
metavar='<EMAIL>',
help="send mail to this address")
oparser.add_argument("-M", dest='mail_command',
action='store_true',
default=False,
help="use mail command instead of localhost SMTP")
oparser.add_argument("-f", dest="from_mail",
default='<EMAIL>',
metavar='<EMAIL>',
help="send mail from this address")
options = oparser.parse_args()
data, data_by_date = get_data(options)
html, plot_files = make_plots(options, data, data_by_date)
if options.mail:
mail = EmailMessage()
mail.set_charset('utf-8')
mail['To'] = ', '.join(options.mail)
mail['From'] = options.from_mail
mail['Subject'] = 'Hydrometer %s' % datetime.datetime.now().strftime('%a %H:%M')
# https://stackoverflow.com/questions/56711321/addng-attachment-to-an-emailmessage-raises-typeerror-set-text-content-got-an
# accepts a maintype argument if the content is bytes, but not if the content is str
mail.add_attachment(html.encode('utf-8'), disposition='inline',
maintype='text', subtype='html')
# https://docs.python.org/3/library/email.examples.html
for file in plot_files:
with open(file, 'rb') as fp:
img_data = fp.read()
mail.add_attachment(img_data, disposition='inline',
maintype='image',
subtype=imghdr.what(None, img_data))
send_mail(mail, options)
|
StarcoderdataPython
|
3511395
|
<reponame>amanThakral002/charm-kubernetes-dashboard<filename>src/cert.py
# Copyright 2021 Canonical
# See LICENSE file for licensing details.
import datetime
from ipaddress import IPv4Address
from typing import List
from cryptography import x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.x509.oid import NameOID
class SelfSignedCert:
"""A class used for generating self-signed RSA TLS certificates"""
def __init__(
self,
names: List[str],
ips: List[IPv4Address] = [],
key_size: int = 2048,
validity: int = 365,
):
"""Initialise a new self-signed certificate.
Args:
names: A list of FQDNs that should be placed in the Subject Alternative
Name field of the certificate. The first name in the list will be
used as the Common Name, Subject and Issuer field.
ips: A list of IPv4Address objects that should be present in the list
of Subject Alternative Names of the certificate.
key_size: Size of the RSA Private Key to be generated. Defaults to 2048
validity: Period in days the certificate is valid for. Default is 365.
Raises:
ValueError: is raised if an empty list of names is provided to the
constructor.
"""
# Ensure that at least one FQDN was provided
# TODO: Do some validation on any provided names
if not names:
raise ValueError("Must provide at least one name for the certificate")
# Create a list of x509.DNSName objects from the list of FQDNs provided
self.names = [x509.DNSName(n) for n in names]
# Create a list of x509IPAdress objects from the list of IPv4Addresses
self.ips = [x509.IPAddress(i) for i in ips] if ips else []
# Initialise some values
self.key_size = key_size
self.validity = validity
self.cert = None
self.key = None
# Generate the certificate
self._generate()
def _generate(self) -> None:
"""Generate a self-signed certificate"""
# Generate a new RSA private key
key = rsa.generate_private_key(public_exponent=65537, key_size=self.key_size)
# Set the subject/issuer to the first of the given names
subject = issuer = x509.Name(
[x509.NameAttribute(NameOID.COMMON_NAME, self.names[0].value)]
)
# Build the cert
cert = (
x509.CertificateBuilder()
.subject_name(subject)
.issuer_name(issuer)
.public_key(key.public_key())
.serial_number(x509.random_serial_number())
.not_valid_before(datetime.datetime.utcnow())
.not_valid_after(datetime.datetime.utcnow() + datetime.timedelta(days=self.validity))
.add_extension(
x509.SubjectAlternativeName(self.names + self.ips),
critical=False,
)
.add_extension(
x509.KeyUsage(
digital_signature=True,
key_encipherment=True,
key_cert_sign=False,
key_agreement=False,
content_commitment=False,
data_encipherment=False,
crl_sign=False,
encipher_only=False,
decipher_only=False,
),
critical=True,
)
.add_extension(
x509.ExtendedKeyUsage(
[
x509.oid.ExtendedKeyUsageOID.SERVER_AUTH,
x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH,
]
),
critical=False,
)
.sign(key, hashes.SHA256())
)
self.cert = cert.public_bytes(serialization.Encoding.PEM)
self.key = key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
|
StarcoderdataPython
|
363797
|
<gh_stars>1-10
def airflow_dags():
"""
Read the list of dags from Airflow's metastore DB and return the list as a pandas.DataFrame
:return: pandas.DataFrame
"""
from airflow import models, settings
dagbag = models.DagBag(settings.DAGS_FOLDER)
return list(dagbag.dags)
|
StarcoderdataPython
|
3330254
|
<filename>in-class-activities/07_Spark/7M_EDA_ML/7M_PySpark_Midway.py
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
# Start Spark Session
spark = SparkSession.builder.getOrCreate()
# Read data
data = spark.read.csv('/project2/macs30123/AWS_book_reviews/*.csv',
header='true',
inferSchema='true')
# Recast columns to correct data type
data = (data.withColumn('star_rating', col('star_rating').cast('int'))
.withColumn('total_votes', col('total_votes').cast('int'))
.withColumn('helpful_votes', col('helpful_votes').cast('int'))
)
# Summarize data by star_rating
stars_votes = (data.groupBy('star_rating')
.sum('total_votes', 'helpful_votes')
.sort('star_rating', ascending=False)
)
# Drop rows with NaN values and then print out resulting data:
stars_votes_clean = stars_votes.dropna()
stars_votes_clean.show()
|
StarcoderdataPython
|
6613867
|
<filename>wow_watches.py
#!/usr/bin/env python
# bhq_query.py - module for sopel to query blade head quarters site for knife data
#
# Copyright (c) 2015,2016 <NAME> <<EMAIL>>
#
# See LICENSE for terms of usage, modification and redistribution.
from sopel import *
@module.commands('wtc')
def knife(bot, trigger):
bot.reply("Look I respond to the wtc command now!")
|
StarcoderdataPython
|
11319309
|
<gh_stars>0
from threading import Timer
class Monitor:
def __init__(self, interval, action, args=None):
args = args if args is not None else []
self.listening = False
self.status = 'pending'
self.timer = Timer(interval, self.timing, [action, args])
def start(self):
if self.listening:
print('Monitor has been start.')
else:
print('Monitor start listening...')
self.listening = True
self.status = 'running'
# start an new thread
self.timer.start()
def stop(self):
if self.listening:
print('Monitor been closed.')
self.listening = False
self.status = 'pending'
# force stop monitor
self.timer.cancel()
else:
print('Monitor did not start.')
def timing(self, action, args):
print('loop...')
action(*args)
if self.listening:
# loop interval while every x sec
self.timer.run()
|
StarcoderdataPython
|
3338356
|
<reponame>matthew-chinn/my-led<filename>original_visualize_spectrum.py
import util
import numpy as np
from scipy.ndimage.filters import gaussian_filter1d
import config
import dsp
r_filt = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.2, alpha_rise=0.99)
g_filt = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.05, alpha_rise=0.3)
b_filt = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.1, alpha_rise=0.5)
common_mode = dsp.ExpFilter(np.tile(0.01, config.N_PIXELS // 2),
alpha_decay=0.99, alpha_rise=0.01)
p_filt = dsp.ExpFilter(np.tile(1, (3, config.N_PIXELS // 2)),
alpha_decay=0.1, alpha_rise=0.99)
p = np.tile(1.0, (3, config.N_PIXELS // 2))
gain = dsp.ExpFilter(np.tile(0.01, config.N_FFT_BINS),
alpha_decay=0.001, alpha_rise=0.99)
_prev_spectrum = np.tile(0.01, config.N_PIXELS // 2)
def effect(y):
global _prev_spectrum
"""Effect that maps the Mel filterbank frequencies onto the LED strip"""
y = np.copy(util.interpolate(y, config.N_PIXELS // 2))
common_mode.update(y)
diff = y - _prev_spectrum
_prev_spectrum = np.copy(y)
# Color channel mappings
r = r_filt.update(y - common_mode.value)
g = np.abs(diff)
b = b_filt.update(np.copy(y))
# Mirror the color channels for symmetric output
r = np.concatenate((r[::-1], r))
g = np.concatenate((g[::-1], g))
b = np.concatenate((b[::-1], b))
output = np.array([r, g,b]) * 255
return output
|
StarcoderdataPython
|
1947116
|
from uuid import uuid4
from random import choice
from rest_framework.test import APITestCase
from irekua_database.utils import simple_JSON_schema
from irekua_database.models import TermType, EntailmentType, Term
from irekua_rest_api.serializers import entailments
from .utils import (
BaseTestCase,
Users,
Actions,
create_permission_mapping_from_lists
)
class EntailmentTestCase(BaseTestCase, APITestCase):
serializer = entailments.CreateSerializer
permissions = create_permission_mapping_from_lists({
Actions.LIST: Users.ALL_AUTHENTICATED_USERS,
Actions.CREATE: [
Users.ADMIN,
Users.CURATOR],
Actions.RETRIEVE: Users.ALL_AUTHENTICATED_USERS,
Actions.UPDATE: [
Users.ADMIN,
Users.CURATOR],
Actions.PARTIAL_UPDATE: [
Users.ADMIN,
Users.CURATOR],
Actions.DESTROY: [
Users.ADMIN,
Users.CURATOR],
})
def setUp(self):
super().setUp()
term_type_1 = TermType.objects.create(
name=str(uuid4()),
description='Random term type',
is_categorical=True,
metadata_schema=simple_JSON_schema(),
synonym_metadata_schema=simple_JSON_schema())
term_type_2 = TermType.objects.create(
name=str(uuid4()),
description='Random term type',
is_categorical=True,
metadata_schema=simple_JSON_schema(),
synonym_metadata_schema=simple_JSON_schema())
EntailmentType.objects.create(
source_type=term_type_1,
target_type=term_type_2,
metadata_schema=simple_JSON_schema())
self.type_1_terms = [
Term.objects.create(
term_type=term_type_1,
value=str(uuid4()),
description='Random term',
metadata={})
for _ in range(40)]
self.type_2_terms = [
Term.objects.create(
term_type=term_type_2,
value=str(uuid4()),
description='Random term',
metadata={})
for _ in range(40)]
def generate_random_json_data(self):
term_1 = choice(self.type_1_terms)
term_2 = choice(self.type_2_terms)
data = {
'source': term_1.pk,
'target': term_2.pk,
'description': 'Random entailment',
'metadata': {}
}
return data
|
StarcoderdataPython
|
8107328
|
#!/usr/bin/env python
"""Print json to stdout or wherever"""
#
# Copyright 2017-2018 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
from google.protobuf import json_format
from builtins import object
from log_collectors.training_data_service_client import training_data_pb2 as tdp
rindex_regex = re.compile(r"\"rindex\":\s\"([0-9]+)\"")
time_regex = re.compile(r"\"time\":\s\"([0-9]+)\"")
def output(obj: object):
"""Take a EMetrics or LogLine record and print a valid JSON object to stdout"""
json_string = to_string(obj)
print(json_string)
def to_string(obj: object)->str:
"""Take a EMetrics or LogLine record and return a valid JSON string"""
# Call buggy google protobuf function.
json_string = json_format.MessageToJson(obj, indent=0, preserving_proto_field_name=True)
# The rest of this is a bit of a hack. Perhaps I'd be better off just
# processing the typed record and hand-printing the json.
json_string = json_string.replace('\n', ' ').replace('\r', '')
for i in range(1, 10):
json_string = json_string.replace(' ', ' ')
json_string = json_string.replace('{ "', '{"')
json_string = json_string.replace('" }', '"}')
json_string = json_string.replace('"type": "STRING"', '"type": 0')
json_string = json_string.replace('"type": "JSONSTRING"', '"type": 1')
json_string = json_string.replace('"type": "INT"', '"type": 2')
json_string = json_string.replace('"type": "FLOAT"', '"type": 3')
json_string = rindex_regex.sub(r'"rindex": \1', json_string)
json_string = time_regex.sub(r'"time": \1', json_string)
return json_string
|
StarcoderdataPython
|
5131841
|
import datetime
from database.database_schemas import Schemas
from sqlalchemy import Column, ForeignKey, Integer, String, DateTime
from sqlalchemy.dialects.mysql import BIGINT, SMALLINT, DOUBLE, TIMESTAMP, TINYINT
from database.base import Base
class ReferentialConstraints(Base):
"""Maps to REFERENTIAL_CONSTRAINTS table in information databases."""
__tablename__ = 'REFERENTIAL_CONSTRAINTS'
__table_args__ = {'schema': Schemas.information_schema}
CONSTRAINT_CATALOG = Column(String, nullable=False)
CONSTRAINT_SCHEMA = Column(String, nullable=False)
CONSTRAINT_NAME = Column(String, nullable=False)
UNIQUE_CONSTRAINT_CATALOG = Column(String, nullable=False)
UNIQUE_CONSTRAINT_SCHEMA = Column(String, nullable=False)
UNIQUE_CONSTRAINT_NAME = Column(String)
MATCH_OPTION = Column(String, nullable=False)
UPDATE_RULE = Column(String, nullable=False)
DELETE_RULE = Column(String, nullable=False)
TABLE_NAME = Column(String, nullable=False)
REFERENCED_TABLE_NAME = Column(String, nullable=False)
|
StarcoderdataPython
|
1639375
|
<filename>snipy/io/fileutil.py
# -*- coding: utf-8 -*-
import os
from codecs import open
import scandir
from snipy.ilogging import logg
import numpy as np
def mkdir_if_not(filepath, ispath=False):
"""
path 부분이 없으면 mkdir 을 한다.
:param filepath: 파일 패쓰
:return: filpath 그대로 리턴
"""
if not ispath:
p, _ = os.path.split(filepath)
else:
p = filepath
if not p:
return filepath
if not os.path.exists(p):
# M.info('%s not exist, trying mkdir ', p)
try:
os.makedirs(p)
except FileExistsError as e:
logg.warn(str(e))
return filepath
def readlines(filepath):
"""
read lines from a textfile
:param filepath:
:return: list[line]
"""
with open(filepath, 'rt') as f:
lines = f.readlines()
lines = map(str.strip, lines)
lines = [l for l in lines if l]
return lines
def writelines(filepath, lines):
mkdir_if_not(filepath)
with open(filepath, 'wt') as f:
for l in lines:
f.write(l + '\n')
def readtxt(filepath):
""" read file as is"""
with open(filepath, 'rt') as f:
lines = f.readlines()
return ''.join(lines)
def writetxt(filepath, txt):
mkdir_if_not(filepath)
with open(filepath, 'wt') as f:
f.write(txt)
def savefile(obj, filepath, compress=True):
"""
파일 있으면 덮어씀
:param obj:
:param str filepath:
:param compress:
:return:
"""
try:
import cPickle as pickle
except Exception:
import pickle
import joblib
# 일단 임시 파일에 저장.
tmpfile = filepath + '.tmp'
mkdir_if_not(tmpfile)
if compress:
joblib.dump(obj, tmpfile, compress=3, cache_size=100, protocol=pickle.HIGHEST_PROTOCOL)
else:
joblib.dump(obj, tmpfile, compress=0)
os.rename(tmpfile, filepath)
return obj
def loadfile(filepath, mmap_mode=None):
"""
:param filepath:
:param mmap_mode: {None, ‘r+’, ‘r’, ‘w+’, ‘c’} see. joblib.load
:return:
"""
import joblib
try:
return joblib.load(filepath, mmap_mode=mmap_mode)
except IOError:
return None
def latest_file(fpattern, matchfun=None):
"""
regular format으로 조회한 파일리스트중에 가장최신 파일 path리턴 file modified time 조회 비교
:param function matchfun:
:param fpattern: 파일 패턴 ( ex: data/some*.txt)
:return: data/somelatest.txt, None 이면 파일이 없는 것
"""
import glob
# matchfun = matchfun or glob.glob
files = glob.glob(fpattern)
if matchfun:
files = filter(matchfun, files)
latest, maxtime = None, 0
for f in files:
t = os.path.getmtime(f)
if t > maxtime:
latest, maxtime = f, t
return latest
def load_latest(fpattern):
latest = latest_file(fpattern)
if latest is None:
return None
else:
return loadfile(latest)
def load_or_run(filepath, fun, *args, **kwargs):
"""
계산된 결과 파일이 있으면 로딩하고, 없으면 계산후 저장
ex)
res = load_or_run('file_loadorsave', funlongtime, ...., force=False)
:param filepath:
:param fun:
:param force:
:return:
"""
force = kwargs.pop('force', False)
compress = kwargs.pop('compress', True)
if not filepath.startswith('/') or not filepath.startswith('~'):
filepath = os.path.join('/tmp/snipy/load_or_run/', filepath)
if not force and os.path.exists(filepath):
# 저장되어 있는 것 로딩
mmap_mode = 'r+' if not compress else None
return loadfile(filepath, mmap_mode=mmap_mode)
res = fun(*args, **kwargs)
savefile(res, filepath, compress=compress)
return res
def readhdf5(f):
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import h5py
warnings.resetwarnings()
return h5py.File(f, 'r')
def getpath(f, isfile=True):
if isfile:
return os.path.dirname(os.path.realpath(f))
else:
return os.path.realpath(f)
# list directory, file, recursive or not, ...
# import scandir
def any_match(fname, patterns, matchfun=None):
"""
ANY matches?
:param str fname: file name
:param list[str] patterns: list of filename pattern. see fnmatch.fnamtch
:rtype: bool
"""
return any(fnmatches(fname, patterns, matchfun))
def fnmatches(fname, patterns, matchfun):
""""
matches?
:param fname: file name
:type fname: str
:param patterns: list of filename pattern. see fnmatch.fnamtch
:type patterns: [str]
:rtype: generator of bool
"""
import fnmatch
matchfun = matchfun or fnmatch.fnmatch
for p in patterns:
yield matchfun(fname, p)
def listdir(p, match='*', exclude='', listtype='file', matchfun=None):
"""
list file(or folder) for this path (NOT recursive)
:param p:
:param match:
:param exclude:
:param listtype: ('file' | 'filepath' |'dir' | 'all')
:param matchfun: match fun (default fnmatch.fnmatch) True/False = matchfun(name, pattern)
:rtype:
"""
if listtype == 'file':
gen = listfile(p)
elif listtype == 'filepath':
gen = listfilepath(p)
elif listtype == 'dir':
gen = listfolder(p)
elif listtype == 'dirpath':
gen = listfolderpath(p)
else: # list file or folder
gen = (entry.name for entry in scandir.scandir(p))
return filter_pattern(gen, match, exclude, matchfun)
def filter_pattern(gen, include='*', exclude='', matchfun=None):
pred = _pred_pattern(include, exclude, matchfun)
for f in gen:
if pred(f):
yield f
def listfile(p):
"""
generator of list files in the path.
filenames only
"""
try:
for entry in scandir.scandir(p):
if entry.is_file():
yield entry.name
except OSError:
return
def listfilepath(p):
"""
generator of list files in the path.
filenames only
"""
for entry in scandir.scandir(p):
if entry.is_file():
yield entry.path
def listfolder(p):
"""
generator of list folder in the path.
folders only
"""
for entry in scandir.scandir(p):
if entry.is_dir():
yield entry.name
def listfolderpath(p):
"""
generator of list folder in the path.
folders only
"""
for entry in scandir.scandir(p):
if entry.is_dir():
yield entry.path
def get_match_fun(patterns, patterntype):
import re
def _fnmatch(fname):
return any_match(fname, patterns, patterntype)
if patterntype != 're': # usually just fnmatch
return _fnmatch
patterns = [re.compile(p) for p in patterns]
def _re_match(fname):
for p in patterns:
if p.match(fname):
return True
return False
return _re_match
def _is_str(x):
try:
return isinstance(x, (str, basestring))
except NameError:
return isinstance(x, str)
def _pred_pattern(match='*', exclude='', patterntype='fnmatch'):
""" internal use """
m, x = match, exclude
if m == '*':
if not x:
pred = lambda n: True
else:
x = [x] if _is_str(x) else x
matcher = get_match_fun(x, patterntype)
pred = lambda n: not matcher(n)
else:
m = [m] if _is_str(m) else m
if not x:
matcher = get_match_fun(m, patterntype)
pred = lambda n: matcher(n)
else:
x = [x] if _is_str(x) else x
matcher_m = get_match_fun(m, patterntype)
matcher_x = get_match_fun(x, patterntype)
pred = lambda n: matcher_m(n) and not matcher_x(n)
return pred
def findfolder(toppath, match='*', exclude=''):
"""
recursively find folder path from toppath.
patterns to decide to walk folder path or not
:type toppath: str
:type match: str or list(str)
:type exclude: str or list(str)
:rtype: generator for path str
"""
pred = _pred_pattern(match, exclude)
return (p for p in walkfolder(toppath, pred))
def walkfolder(toppath, pred):
"""
walk folder if pred(foldername) is True
:type toppath: str
:type pred: function(str) => bool
"""
for entry in scandir.scandir(toppath):
if not entry.is_dir() or not pred(entry.name):
continue
yield entry.path
for p in walkfolder(entry.path, pred):
yield p
def tempdir():
import tempfile
d = tempfile.gettempdir()
p = os.path.join(d, 'her_temp')
mkdir_if_not(p, ispath=True)
return p
def tempfolder(prefix=''):
"""임시 폴더를 만들어서 리턴"""
import uuid
p = prefix + str(uuid.uuid4())
d = tempdir()
tmpd = os.path.join(d, p)
return mkdir_if_not(tmpd, ispath=True)
def tempfile(mode, ext='', **kwargs):
import uuid
d = tempdir()
if ext and not ext.startswith('.'):
ext = '.' + ext
fname = os.path.join(d, str(uuid.uuid4()) + ext)
return open(fname, mode, **kwargs)
def renderhtml(template, **kwargs):
# from packageutil import caller_path
from .caller import caller
from jinja2 import Environment, FileSystemLoader
from os.path import dirname
if '/' not in template:
p = dirname(caller.abspath(depth=2))
else:
p, template = os.path.split(template)
j2_env = Environment(loader=FileSystemLoader(p),
trim_blocks=True)
rendered = j2_env.get_template(template).render(**kwargs)
return rendered
def renderimages(images, width=80, height=80, space=0):
import webbrowser
template = os.path.dirname(os.path.realpath(__file__)) + '/template/images.html'
rendered = renderhtml(template, data=images, width=width, height=height, space=space)
tmp = tempfile('wt', '.html')
tmp.write(rendered)
tmp.flush()
webbrowser.open(tmp.name)
return tmp.name
def imsize(fname):
"""
return image size (height, width)
:param fname:
:return:
"""
from PIL import Image
im = Image.open(fname)
return im.size[1], im.size[0]
def imread(fname, size=None, expand=True, dtype='float32', **kwargs):
from skimage import io, transform
import numpy as np
img = io.imread(fname, **kwargs)
image_max = np.iinfo(img.dtype).max
if size is not None:
sz = list(img.shape)
sz[:len(size)] = size
img = transform.resize(img, sz, preserve_range=True)
if dtype.startswith('float'):
# normalize 0 to 1
img = img.astype(dtype) / float(image_max)
else:
img = img.astype(dtype)
if expand:
img = np.expand_dims(img, 0)
if img.ndim == 3:
img = np.expand_dims(img, -1)
return img
def imsave(fname, *args, **kwargs):
from skimage import io
mkdir_if_not(fname)
res = io.imsave(fname, *args, **kwargs)
logg.info('image saved to [{}]'.format(fname))
return res
def imread_palette(fname, expand=True, dtype='uint8', mode='r'):
from PIL import Image
# use png? https://pythonhosted.org/pypng/png.html ?
img = Image.open(fname, mode=mode)
palette = img.getpalette()
if palette is not None:
img = np.array(img)
num_colors = len(palette) / 3
image_max = float(np.iinfo(img.dtype).max)
palette = np.array(palette).reshape(num_colors, 3) / image_max
else:
colors = img.convert('RGBA').getcolors()
num_colors = len(colors)
assert num_colors <= 256
palette = [c[:3] for _, c in colors]
im = Image.new('P', img.size)
palette = np.array(palette).reshape((-1))
im.putpalette(palette, rawmode="RGB")
im.paste(img)
palette = im.getpalette()
img = np.array(im)
img = img.astype(dtype)
palette = palette.astype('float32')
if expand:
img = np.expand_dims(img, axis=0)
palette = np.expand_dims(palette, axis=0)
return img, palette
def filecopy(src, dst):
import shutil
shutil.copy(src, dst)
def download(url, out=None):
import wget
if out is not None:
mkdir_if_not(out)
logg.info('downloading... [{}] to [{}]'.format(url, out))
f = wget.download(url, out=out)
return f
def download_if_not(url, f):
if not os.path.exists(f):
f = download(url, f)
return f
def unzip(z, member=None):
import zipfile
path = os.path.dirname(z)
zip = zipfile.ZipFile(z, 'r')
if member is None:
zip.extractall(path)
logg.info('unzip [{}] to [{}]'.format(z, path))
else:
zip.extract(member, path)
logg.info('unzip [{}] to [{}]'.format(member, path))
zip.close()
def untar(t, member=None):
import tarfile
path = os.path.dirname(t)
tar = tarfile.open(t)
if member is None:
tar.extractall(path)
logg.info('unzip [{}] to [{}]'.format(t, path))
else:
tar.extract(member, path)
logg.info('unzip [{}] to [{}]'.format(member, path))
tar.close()
def anyfile(pattern):
import glob
return any(glob.glob(pattern))
|
StarcoderdataPython
|
248871
|
from modules.Load import *
from modules.Plotter import *
file=None
# test via manual selection of data source ( explicit run of this file alone )
if __name__ != "__main__":
file="../../data/2019 06 12/0 degrees/run1/"
run = LoadSingleRun(file)
plotter = MultiPlotter_CreateFromRotaryAndAccel(run["omega"], run["accel"])
plotter.display()
# original code:
#run = LoadRun(None if __name__ == "__main__" else "../../data/2019 06 12/0 degrees/run1/")
#Plot(run["accel"], run["omega"])
file=None
|
StarcoderdataPython
|
9636815
|
# Čtení ze souboru
# Použití operátoru `with open(...) as` zařídí, že na konci bloku se soubor korektně uzavře
with open("/home/jethro/Dokumenty/prifuk/uvod-do-prg_21/cv06/soubor.txt", encoding="utf-8") as f:
# obsah = f.read() # Přečtení celého souboru naráz
# print(obsah)
for radek in f: # Čtení souboru po řádcích
print(f"Řádek: {radek.rstrip()}")
# Zápis do souboru
# mode="w" otevře soubor pro zápis, případný existující soubor stejného jména smaže
with open("/home/jethro/Dokumenty/prifuk/uvod-do-prg_21/cv06/soubor_out.txt",
mode="w", encoding="utf-8") as f:
f.write("Ahoj") # Zápis řetězce do souboru, nevkládá se automaticky nový řádek
|
StarcoderdataPython
|
11374595
|
import consts
def TEST_():
"""
[FUNC] TEST_:
Silly tests
"""
PI = consts.Consts.PI;
E = consts.Consts.E;
GOLDEN = consts.Consts.GOLDEN;
DEGREE = consts.Consts.DEGREE
MIN = consts.Consts.MINUTE;
HOUR = consts.Consts.HOUR;
DAY = consts.Consts.DAY;
YR = consts.Consts.YEAR;
return PI, E, GOLDEN, DEGREE, MIN, HOUR, DAY, YR;
|
StarcoderdataPython
|
1782049
|
"""Top-level package for barbell2light."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.27.0'
from .castorclient import CastorClient
from .castorexportclient import CastorExportClient
from .utils import Logger
from .utils import current_time_millis
from .utils import current_time_secs
from .utils import elapsed_millis
from .utils import elapsed_secs
from .utils import duration
|
StarcoderdataPython
|
1909372
|
import pysmurf
import numpy as np
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import os
import seaborn as sns
S = pysmurf.SmurfControl(make_logfile=False,
epics_root='test_epics',
cfg_file='/usr/local/controls/Applications/'+\
'smurf/pysmurf/pysmurf/cfg_files/'+\
'experiment_fp28_smurfsrv04.cfg')
datafile = S.take_stream_data(5)
t, d, m = S.read_stream_data(datafile)
ivch = np.array([16,32,64,165,171,179,197,203,213,222,256,389,395,398,415,421,427,447])
d = d[m[2][ivch]]
# d = d[m[np.where(m!=-1)]]
#print(np.shape(d))
pca = PCA(svd_solver='full')
pca.fit(d.T)
d2 = pca.transform(d.T).T
print(np.shape(d2))
fig, ax = plt.subplots(3,4, figsize=(12,7), sharex=True)
for i in np.arange(12):
y = i // 4
x = i % 4
ax[y,x].plot(d2[i])
ax[y,x].set_title('Mode {}'.format(i))
dirname, filename = os.path.split(datafile)
timestamp = filename.split('.')[0]
plt.savefig(os.path.join(dirname, '{}_modes.png'.format(timestamp)),
bbox_inches='tight')
plt.show()
cm = plt.get_cmap('viridis')
for i, ch in enumerate(ivch):
plt.figure()
plt.plot(d[m[i], 'k')
plt.title(ch)
plt.savefig(os.path.join(dirname, '{}_ch{:03}.png'.format(timestamp, ch)),
bbox_inches='tight')
plt.close()
|
StarcoderdataPython
|
1922219
|
import flask
from flask import abort
from flask import current_app, g
from flask import request
frontend = flask.Blueprint("lnt", __name__, template_folder="ui/templates/",
static_folder="ui/static")
def _make_db_session(db_name):
# Initialize the database parameters on the app globals object.
g.db_name = db_name or "default"
g.db_info = current_app.old_config.databases.get(g.db_name)
if g.db_info is None:
abort(404, "Unknown database.")
request.db = current_app.instance.get_database(g.db_name)
request.session = request.db.make_session()
def db_route(rule, **options):
"""
LNT specific route for endpoints which always refer to some database
object.
This decorator handles adding the routes for both the default and explicit
database, as well as initializing the global database information objects.
"""
def decorator(f):
def wrap(db_name=None, **args):
_make_db_session(db_name)
try:
return f(**args)
finally:
request.session.close()
frontend.add_url_rule(rule, f.__name__, wrap, **options)
frontend.add_url_rule("/db_<db_name>" + rule,
f.__name__, wrap, **options)
return wrap
return decorator
def v4_route(rule, **options):
"""
LNT V4 specific route for endpoints which always refer to some testsuite
object.
"""
def decorator(f):
def wrap(testsuite_name, db_name=None, **args):
g.testsuite_name = testsuite_name
_make_db_session(db_name)
try:
return f(**args)
finally:
request.session.close()
frontend.add_url_rule("/v4/<testsuite_name>" + rule,
f.__name__, wrap, **options)
frontend.add_url_rule("/db_<db_name>/v4/<testsuite_name>" + rule,
f.__name__, wrap, **options)
return wrap
return decorator
def in_db(func):
"""Extract the database information off the request and attach to
particular test suite and database. Used by the REST api."""
def wrap(*args, **kwargs):
db = kwargs.pop('db')
ts = kwargs.pop('ts')
g.testsuite_name = ts
_make_db_session(db)
try:
return func(*args, **kwargs)
finally:
request.session.close()
return wrap
|
StarcoderdataPython
|
9627499
|
import json
import os
import sys
json_path = sys.argv[1]
with open(json_path, 'r') as f:
tag = json.load(f)['Tags'][0]
print tag['Value']
|
StarcoderdataPython
|
20031
|
<reponame>sthagen/facultyai-dash-bootstrap-components<gh_stars>10-100
import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
list_group = html.Div(
[
make_subheading("ListGroup", "list_group"),
dbc.ListGroup(
[
dbc.ListGroupItem("No color applied"),
dbc.ListGroupItem("The primary item", color="primary"),
dbc.ListGroupItem("A secondary item", color="secondary"),
dbc.ListGroupItem("A successful item", color="success"),
dbc.ListGroupItem("A warning item", color="warning"),
dbc.ListGroupItem("A dangerous item", color="danger"),
dbc.ListGroupItem("An informative item", color="info"),
dbc.ListGroupItem("A light item", color="light"),
dbc.ListGroupItem("A dark item", color="dark"),
dbc.ListGroupItem("An action item", action=True),
dbc.ListGroupItem("An active item", active=True),
dbc.ListGroupItem(
[
html.H5("Item 4 heading"),
html.P("Item 4 text"),
]
),
]
),
],
className="mb-4",
)
|
StarcoderdataPython
|
11251010
|
<gh_stars>1-10
import ssl
import socket
import requests
import OpenSSL
from datetime import date
#Input: string with filename to file with websites
#Return: nothing, create files of information
def create_certificate_file(filename):
f = open(filename, 'r')
#Creating lits for storing data
hsts_list = []
data = []
dates = []
ssl_errors = []
req_errors = []
other_errors = []
print("\nNew file: " + filename)
print('Fetching pem-certificates...\n')
#Check every website in the file
for website in f:
#Get url
website = 'www.' + str(website.strip().lower())
temp = []
try:
#ssl.PROTOCOL_SSLv23
#Selects SSL version 2 or 3 as the channel encryption protocol.
#This is a setting to use with servers for maximum compatibility with the other end of an SSL connection,
#but it may cause the specific ciphers chosen for the encryption to be of fairly low quality.
#ssl.PROTOCOL_SSLv3
#Selects SSL version 3 as the channel encryption protocol.
#For clients, this is the maximally compatible SSL variant.
#ssl.PROTOCOL_TLSv1
#Selects TLS version 1 as the channel encryption protocol.
#This is the most modern version, and probably the best choice for maximum protection, if both sides can speak it.
#Fetch certificate
socket.setdefaulttimeout(10)
certificate = ssl.get_server_certificate((socket.gethostbyname(website), 443), ssl_version = ssl.PROTOCOL_SSLv23)
#Create certificate-files
pem = OpenSSL.crypto.FILETYPE_PEM
cert = OpenSSL.crypto.load_certificate(pem, certificate)
cert_file = open('certificates\\'+filename[9:-4]+'-'+website+'.txt', 'w')
#Website
cert_file.write(website + '\n')
#HSTS or NOT
req = requests.get('https://' + website)
if 'strict-transport-security' in req.headers:
cert_file.write("HSTS" + "\n")
temp.append(website + ":" + "HSTS" + "\n")
else:
cert_file.write("NO HSTS" + "\n")
temp.append(website + ":" + "NO HSTS" + "\n")
#Signature algorithm
cert_file.write(cert.get_signature_algorithm().decode("utf-8") + '\n')
temp.append(website + ":" + cert.get_signature_algorithm().decode("utf-8") + '\n')
#Issuer organisation
cert_file.write(cert.get_issuer().O + '\n')
temp.append(website + ":" + cert.get_issuer().O + '\n')
#Public key type and bits, types: 6 = RSA, 116 = DSA, 408 = EC
key_type = cert.get_pubkey().type()
if key_type == 6:
cert_file.write('RSA ' + str(cert.get_pubkey().bits()) + '\n')
temp.append(website + ":" + 'RSA ' + str(cert.get_pubkey().bits()) + '\n')
elif key_type == 116:
cert_file.write('DSA ' + str(cert.get_pubkey().bits()) + '\n')
temp.append(website + ":" + 'DSA ' + str(cert.get_pubkey().bits()) + '\n')
elif key_type == 408:
cert_file.write('EC ' + str(cert.get_pubkey().bits()) + '\n')
temp.append(website + ":" + 'EC ' + str(cert.get_pubkey().bits()) + '\n')
#Close certificate file
cert_file.close()
#Store certificate data
data.append(temp)
#Store the certificate dates
dates.append([])
dates[-1].append(website)
#Certificate startdate
start = cert.get_notBefore().decode("utf-8")
y = int(start[:4])
m = int(start[4:6])
d = int(start[6:8])
dates[-1].append(date(y,m,d))
#Certificate enddate
end = cert.get_notAfter().decode("utf-8")
y = int(end[:4])
m = int(end[4:6])
d = int(end[6:8])
dates[-1].append(date(y,m,d))
except ssl.SSLError as e:
ssl_errors.append(website)
print('\nssl:')
print(website + ' failed.')
except requests.exceptions.SSLError as e:
req_errors.append(website)
print('\nreq:')
print(website + ' failed.')
#ConnectionRefusedError, ConnectionResetError or TimeoutError etc
except Exception as e:
other_errors.append(website + ":" + str(type(e)))
print('\n' + website + ' failed.')
f.close()
failures = len(ssl_errors) + len(req_errors) + len(other_errors)
#Print summary
print('\nTotal number of SSL Errors: ' + str(len(ssl_errors)) + '.')
print('Total number of Request Errors: ' + str(len(req_errors)) + '.')
print('Total number of other errors: ' + str(len(other_errors)) + '.')
print(str(len(data)) + ' successful certificates, ' + str(failures) + ' websites failed.')
print('Done fetching and saving certificates.')
#Write certificate data to file
create_data_file(filename, data)
#Write errors to files
create_error_file(filename, ssl_errors, "SSL")
create_error_file(filename, req_errors, "REQ")
create_error_file(filename, other_errors, "OTHER")
#Write dates to file
create_date_file(filename, dates)
#Write stats to file
create_stat_file(filename, len(ssl_errors), len(req_errors), len(other_errors))
#Input: filename, list of errors and string with type of error
#Return: nothing, create file with information about the errors
def create_error_file(filename, errors, error_type):
#Open file
error_file = open('errors\\' + filename[9:-4] + '-' + error_type + '-errors.txt', 'w')
#Write information about the errors to file
error_file.write('Total number of ' + error_type + ' errors:' + str(len(errors)) + '\n')
error_file.write('\n')
#Add each website with errors to file
for domain in errors:
error_file.write(domain + '\n')
#Close the file
error_file.close()
print('\n' + filename[9:-4] + '-' + error_type + '-errors.txt are ready.')
#Input: string with filename and list with data
#Return: nothing, create file with information about the data
def create_data_file(filename, data):
#Open file
data_file = open('data\\' + filename[9:-4] + '-data.txt', 'w')
#Write all data about websites to file
for i in range(len(data[0])):
for j in range(len(data)):
data_file.write(data[j][i])
data_file.write('\n')
#Close the file
data_file.close()
print('\n' + filename[9:-4] + '-data.txt are ready.')
#Input: string with filename and list with dates
#Return: nothing, create file with information about the dates
def create_date_file(filename, dates):
#Open file
date_file = open('dates\\' + filename[9:-4] + '-dates.txt', 'w')
#Sort the file
dates = list(reversed(sort_tuples(dates, 2)))
#Write every date to file
for d in dates:
date_file.write(d[0] + ':' + str(d[1]) + ':' + str(d[2]) + '\n')
#Close the file
date_file.close()
print('\n' + filename[9:-4] + '-dates.txt are ready.')
#Input: list of tuples of data
#Return: list of sorted tubles with data
def sort_tuples(data_list, pos):
#Sort list depending on second pos value
for i in range(len(data_list)):
for j in range(1,len(data_list)):
#Greatest element first in list
if data_list[j-1][pos] < data_list[j][pos]:
temp = data_list[j-1]
data_list[j-1] = data_list[j]
data_list[j] = temp
#Return the sorted list
return data_list
#Input: string with filename and number of different errors
#Return: nothing, create file with information with statistics
def create_stat_file(filename, a, b, c):
#Open file
f = open('data\\' + filename[9:-4] + '-data.txt', 'r')
#Create lists to store data
data = [[]]
stat = []
#Read all the data and add to list
for line in f:
if line == '\n':
data.append([])
else:
data[-1].append(line.strip('\n').split(":")[1])
#Go through all categories
for category in data:
data_set = set(category)
data_list = []
#Count number of distinct elements and add to list
for element in data_set:
data_list.append((element, category.count(element)))
#Add information to list and add a dummy point
stat.extend(sort_tuples(data_list),1)
stat.append((0,0))
#Open stat file
f = open('stat\\' + filename[9:-4] + '-stat.txt', 'w')
#Write information about the errors to the file
f.write("SSL errors:" + str(a) + "\n")
f.write("REQ errors:" + str(b) + "\n")
f.write("Other errors:" + str(c) + "\n")
f.write("Total number of errors:" + str(a+b+c) + "\n\n")
#Go througf every data-tuple in the lsit
for tup in stat:
#Write the information to the file
if tup == (0,0):
f.write('\n')
else:
f.write(tup[0] + ':' + str(tup[1]) + '\n')
#Close the file
f.close()
print('\n' + filename[9:-4] + '-stat.txt are ready.')
|
StarcoderdataPython
|
9670565
|
<reponame>seekindark/helloworld
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
pyu40 PyQt5 tutorial
In this example, we reimplement an
event handler.
author: <NAME>
website: py40.com
last edited: January 2015
"""
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QWidget, QApplication
class Example(QWidget):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
self.setGeometry(300, 300, 250, 150)
self.setWindowTitle('Event handler')
self.show()
def keyPressEvent(self, e): # 重新实现这个基类方法, 输入ESC 键,关闭窗口
if e.key() == Qt.Key_Escape:
self.close()
elif e.key() == Qt.Key_E:
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_())
|
StarcoderdataPython
|
112368
|
<reponame>nkashy1/tensorio-models
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from . import repository_pb2 as repository__pb2
class RepositoryStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Healthz = channel.unary_unary(
'/api.Repository/Healthz',
request_serializer=repository__pb2.HealthCheckRequest.SerializeToString,
response_deserializer=repository__pb2.HealthCheckResponse.FromString,
)
self.Config = channel.unary_unary(
'/api.Repository/Config',
request_serializer=repository__pb2.ConfigRequest.SerializeToString,
response_deserializer=repository__pb2.ConfigResponse.FromString,
)
self.ListModels = channel.unary_unary(
'/api.Repository/ListModels',
request_serializer=repository__pb2.ListModelsRequest.SerializeToString,
response_deserializer=repository__pb2.ListModelsResponse.FromString,
)
self.CreateModel = channel.unary_unary(
'/api.Repository/CreateModel',
request_serializer=repository__pb2.CreateModelRequest.SerializeToString,
response_deserializer=repository__pb2.CreateModelResponse.FromString,
)
self.GetModel = channel.unary_unary(
'/api.Repository/GetModel',
request_serializer=repository__pb2.GetModelRequest.SerializeToString,
response_deserializer=repository__pb2.GetModelResponse.FromString,
)
self.UpdateModel = channel.unary_unary(
'/api.Repository/UpdateModel',
request_serializer=repository__pb2.UpdateModelRequest.SerializeToString,
response_deserializer=repository__pb2.UpdateModelResponse.FromString,
)
self.ListHyperparameters = channel.unary_unary(
'/api.Repository/ListHyperparameters',
request_serializer=repository__pb2.ListHyperparametersRequest.SerializeToString,
response_deserializer=repository__pb2.ListHyperparametersResponse.FromString,
)
self.CreateHyperparameters = channel.unary_unary(
'/api.Repository/CreateHyperparameters',
request_serializer=repository__pb2.CreateHyperparametersRequest.SerializeToString,
response_deserializer=repository__pb2.CreateHyperparametersResponse.FromString,
)
self.GetHyperparameters = channel.unary_unary(
'/api.Repository/GetHyperparameters',
request_serializer=repository__pb2.GetHyperparametersRequest.SerializeToString,
response_deserializer=repository__pb2.GetHyperparametersResponse.FromString,
)
self.UpdateHyperparameters = channel.unary_unary(
'/api.Repository/UpdateHyperparameters',
request_serializer=repository__pb2.UpdateHyperparametersRequest.SerializeToString,
response_deserializer=repository__pb2.UpdateHyperparametersResponse.FromString,
)
self.ListCheckpoints = channel.unary_unary(
'/api.Repository/ListCheckpoints',
request_serializer=repository__pb2.ListCheckpointsRequest.SerializeToString,
response_deserializer=repository__pb2.ListCheckpointsResponse.FromString,
)
self.CreateCheckpoint = channel.unary_unary(
'/api.Repository/CreateCheckpoint',
request_serializer=repository__pb2.CreateCheckpointRequest.SerializeToString,
response_deserializer=repository__pb2.CreateCheckpointResponse.FromString,
)
self.GetCheckpoint = channel.unary_unary(
'/api.Repository/GetCheckpoint',
request_serializer=repository__pb2.GetCheckpointRequest.SerializeToString,
response_deserializer=repository__pb2.GetCheckpointResponse.FromString,
)
class RepositoryServicer(object):
# missing associated documentation comment in .proto file
pass
def Healthz(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Config(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListModels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateModel(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListHyperparameters(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateHyperparameters(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetHyperparameters(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateHyperparameters(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListCheckpoints(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateCheckpoint(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetCheckpoint(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RepositoryServicer_to_server(servicer, server):
rpc_method_handlers = {
'Healthz': grpc.unary_unary_rpc_method_handler(
servicer.Healthz,
request_deserializer=repository__pb2.HealthCheckRequest.FromString,
response_serializer=repository__pb2.HealthCheckResponse.SerializeToString,
),
'Config': grpc.unary_unary_rpc_method_handler(
servicer.Config,
request_deserializer=repository__pb2.ConfigRequest.FromString,
response_serializer=repository__pb2.ConfigResponse.SerializeToString,
),
'ListModels': grpc.unary_unary_rpc_method_handler(
servicer.ListModels,
request_deserializer=repository__pb2.ListModelsRequest.FromString,
response_serializer=repository__pb2.ListModelsResponse.SerializeToString,
),
'CreateModel': grpc.unary_unary_rpc_method_handler(
servicer.CreateModel,
request_deserializer=repository__pb2.CreateModelRequest.FromString,
response_serializer=repository__pb2.CreateModelResponse.SerializeToString,
),
'GetModel': grpc.unary_unary_rpc_method_handler(
servicer.GetModel,
request_deserializer=repository__pb2.GetModelRequest.FromString,
response_serializer=repository__pb2.GetModelResponse.SerializeToString,
),
'UpdateModel': grpc.unary_unary_rpc_method_handler(
servicer.UpdateModel,
request_deserializer=repository__pb2.UpdateModelRequest.FromString,
response_serializer=repository__pb2.UpdateModelResponse.SerializeToString,
),
'ListHyperparameters': grpc.unary_unary_rpc_method_handler(
servicer.ListHyperparameters,
request_deserializer=repository__pb2.ListHyperparametersRequest.FromString,
response_serializer=repository__pb2.ListHyperparametersResponse.SerializeToString,
),
'CreateHyperparameters': grpc.unary_unary_rpc_method_handler(
servicer.CreateHyperparameters,
request_deserializer=repository__pb2.CreateHyperparametersRequest.FromString,
response_serializer=repository__pb2.CreateHyperparametersResponse.SerializeToString,
),
'GetHyperparameters': grpc.unary_unary_rpc_method_handler(
servicer.GetHyperparameters,
request_deserializer=repository__pb2.GetHyperparametersRequest.FromString,
response_serializer=repository__pb2.GetHyperparametersResponse.SerializeToString,
),
'UpdateHyperparameters': grpc.unary_unary_rpc_method_handler(
servicer.UpdateHyperparameters,
request_deserializer=repository__pb2.UpdateHyperparametersRequest.FromString,
response_serializer=repository__pb2.UpdateHyperparametersResponse.SerializeToString,
),
'ListCheckpoints': grpc.unary_unary_rpc_method_handler(
servicer.ListCheckpoints,
request_deserializer=repository__pb2.ListCheckpointsRequest.FromString,
response_serializer=repository__pb2.ListCheckpointsResponse.SerializeToString,
),
'CreateCheckpoint': grpc.unary_unary_rpc_method_handler(
servicer.CreateCheckpoint,
request_deserializer=repository__pb2.CreateCheckpointRequest.FromString,
response_serializer=repository__pb2.CreateCheckpointResponse.SerializeToString,
),
'GetCheckpoint': grpc.unary_unary_rpc_method_handler(
servicer.GetCheckpoint,
request_deserializer=repository__pb2.GetCheckpointRequest.FromString,
response_serializer=repository__pb2.GetCheckpointResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'api.Repository', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
StarcoderdataPython
|
8025704
|
"""generate openapi docs."""
from pkg_resources import get_distribution
from honeybee_schema._openapi import get_openapi, get_model_mapper
from honeybee_schema.model import Model
from honeybee_schema.energy.simulation import SimulationParameter
import json
import argparse
parser = argparse.ArgumentParser(description='Generate OpenAPI JSON schemas')
parser.add_argument('--version',
help='Set the version of the new OpenAPI Schema')
args = parser.parse_args()
VERSION = None
if args.version:
VERSION = args.version.replace('v', '')
else:
VERSION = '.'.join(get_distribution('honeybee_schema').version.split('.')[:3])
info = {
"description": "",
"version": VERSION,
"title": "",
"contact": {
"name": "<NAME>",
"email": "<EMAIL>",
"url": "https://github.com/ladybug-tools/honeybee-schema"
},
"x-logo": {
"url": "https://www.ladybug.tools/assets/img/honeybee-large.png",
"altText": "Honeybee logo"
},
"license": {
"name": "BSD",
"url": "https://github.com/ladybug-tools-in2/honeybee-schema/blob/master/LICENSE"
}
}
# generate Model open api schema
print('Generating Model documentation...')
external_docs = {
"description": "OpenAPI Specification with Inheritance",
"url": "./model_inheritance.json"
}
openapi = get_openapi(
[Model],
title='Honeybee Model Schema',
description='This is the documentation for Honeybee model schema.',
version=VERSION, info=info,
external_docs=external_docs)
with open('./docs/model.json', 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# with inheritance
openapi = get_openapi(
[Model],
title='Honeybee Model Schema',
description='This is the documentation for Honeybee model schema.',
version=VERSION, info=info,
inheritance=True,
external_docs=external_docs
)
with open('./docs/model_inheritance.json', 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# add the mapper file
mapper = get_model_mapper(Model)
module_mapper = {k: c.__module__ for k, c in mapper.items()}
with open('./docs/model_mapper.json', 'w') as out_file:
json.dump(module_mapper, out_file, indent=2)
# generate SimulationParameter open api schema
print('Generating Energy Simulation Parameter documentation...')
external_docs = {
"description": "OpenAPI Specification with Inheritance",
"url": "./simulation-parameter_inheritance.json"
}
openapi = get_openapi(
[SimulationParameter],
title='Honeybee Energy Simulation Parameter Schema',
description='This is the documentation for Honeybee energy simulation parameter schema.',
version=VERSION, info=info,
external_docs=external_docs)
with open('./docs/simulation-parameter.json', 'w') as out_file:
json.dump(openapi, out_file, indent=2)
openapi = get_openapi(
[SimulationParameter],
title='Honeybee Energy Simulation Parameter Schema',
description='This is the documentation for Honeybee energy simulation parameter schema.',
version=VERSION, inheritance=True, info=info,
external_docs=external_docs
)
with open('./docs/simulation-parameter_inheritance.json', 'w') as out_file:
json.dump(openapi, out_file, indent=2)
# add the mapper file
mapper = get_model_mapper(SimulationParameter)
module_mapper = {k: c.__module__ for k, c in mapper.items()}
with open('./docs/simulation-parameter_mapper.json', 'w') as out_file:
json.dump(module_mapper, out_file, indent=2)
|
StarcoderdataPython
|
88768
|
import os
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
VERBOSE = True
USE_CUDA = True
def model_summary(model):
for idx, m in enumerate(model.modules()):
print(idx, '->', m)
def save_checkpoint(state, loss, prefix, ckptpath):
filename_late = os.path.join(ckptpath, "%s_%.5f.tar" % (prefix, loss))
torch.save(state, filename_late)
def adjust_learning_rate(initial, optimizer, epoch, factor=0.1):
lr = max(initial * (factor ** (epoch // 2)), 0.0001)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def set_learning_rate(lr, optimizer):
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# https://github.com/pytorch/pytorch/issues/2830
def optimizer_cuda(optimizer):
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
class AdaptiveLR(object):
def __init__(self, opt, initial_lr, num_iterations=1000):
self._lr = initial_lr
self.opt = opt
self.losses = []
self.window = num_iterations
self.min_lr = 0.0001
self.factor = 0.5
def update(self, loss):
losses = self.losses
while len(losses) > self.window:
losses.pop(0)
losses.append(loss)
if len(losses) < self.window:
return
avg_old = np.mean(losses[:self.window//2])
avg_new = np.mean(losses[self.window//2:])
if avg_new < avg_old:
return
self.lr = max(self.lr * self.factor, self.min_lr)
self.losses = [] # restart loss count
@property
def lr(self):
return self._lr
@lr.setter
def lr(self, val):
if VERBOSE:
print("resetting LR: %s -> %s" % (self._lr, val))
set_learning_rate(val, self.opt)
self._lr = val
def shuffle(data, labels):
s = np.arange(data.shape[0])
np.random.shuffle(s)
return data[s], labels[s]
class CosineSimilarityRegressionLoss(nn.Module):
def __init__(self):
super(CosineSimilarityRegressionLoss, self).__init__()
def forward(self, vec1, vec2, y):
mse = nn.MSELoss()
y_hat = F.cosine_similarity(vec1, vec2)
return mse(y_hat, y)
class CosineSimilarityLossWithL2Regularization(nn.Module):
def __init__(self, cos_sim_margin=0.1, l2_margin=0.1, alpha=0.1):
super(CosineSimilarityLossWithL2Regularization, self).__init__()
self.cos_sim_margin = cos_sim_margin
self.l2_margin = l2_margin
self.alpha = alpha
def forward(self, vec1, vec2, y):
assert vec1.size(0) == vec2.size(0)
ones = Variable(torch.ones(vec1.size(0), 1))
if USE_CUDA:
ones = ones.cuda()
# l2_1 = torch.clamp(torch.abs(ones - vec1.norm(p=2, dim=1)), max=1.0)
# l2_2 = torch.clamp(torch.abs(ones - vec2.norm(p=2, dim=1)), max=1.0)
# l2_1 = l2_1.mean()
# l2_2 = l2_2.mean()
l2_1 = F.l1_loss(ones, vec1.norm(p=2, dim=1))
l2_2 = F.l1_loss(ones, vec2.norm(p=2, dim=1))
loss = F.cosine_embedding_loss(vec1, vec2, y)
return loss + self.alpha * (l2_1 + l2_2)
|
StarcoderdataPython
|
9605258
|
import unittest
from os.path import join
from w3lib.encoding import html_to_unicode
from scrapy.utils.gz import gunzip, gzip_magic_number
from scrapy.http import Response
from tests import tests_datadir
SAMPLEDIR = join(tests_datadir, 'compressed')
class GunzipTest(unittest.TestCase):
def test_gunzip_basic(self):
with open(join(SAMPLEDIR, 'feed-sample1.xml.gz'), 'rb') as f:
r1 = Response("http://www.example.com", body=f.read())
self.assertTrue(gzip_magic_number(r1))
r2 = Response("http://www.example.com", body=gunzip(r1.body))
self.assertFalse(gzip_magic_number(r2))
self.assertEqual(len(r2.body), 9950)
def test_gunzip_truncated(self):
with open(join(SAMPLEDIR, 'truncated-crc-error.gz'), 'rb') as f:
text = gunzip(f.read())
assert text.endswith(b'</html')
def test_gunzip_no_gzip_file_raises(self):
with open(join(SAMPLEDIR, 'feed-sample1.xml'), 'rb') as f:
self.assertRaises(IOError, gunzip, f.read())
def test_gunzip_truncated_short(self):
with open(join(SAMPLEDIR, 'truncated-crc-error-short.gz'), 'rb') as f:
r1 = Response("http://www.example.com", body=f.read())
self.assertTrue(gzip_magic_number(r1))
r2 = Response("http://www.example.com", body=gunzip(r1.body))
assert r2.body.endswith(b'</html>')
self.assertFalse(gzip_magic_number(r2))
def test_is_gzipped_empty(self):
r1 = Response("http://www.example.com")
self.assertFalse(gzip_magic_number(r1))
def test_gunzip_illegal_eof(self):
with open(join(SAMPLEDIR, 'unexpected-eof.gz'), 'rb') as f:
text = html_to_unicode('charset=cp1252', gunzip(f.read()))[1]
with open(join(SAMPLEDIR, 'unexpected-eof-output.txt'), 'rb') as o:
expected_text = o.read().decode("utf-8")
self.assertEqual(len(text), len(expected_text))
self.assertEqual(text, expected_text)
|
StarcoderdataPython
|
6474535
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import typing
Schema = typing.Mapping[str, typing.Any]
|
StarcoderdataPython
|
4981964
|
<gh_stars>1-10
# SPDX-FileCopyrightText: 2020 Splunk Inc.
#
# SPDX-License-Identifier: Apache-2.0
from future import standard_library
standard_library.install_aliases()
from builtins import object
class RESTURIS(object):
"""
Simple module to wrap REST endpoints into a consistent set of methods
"""
URIS = {
"APP": "/servicesNS/{u}/{a}/apps",
"APP_TEMPLATE": "/servicesNS/{u}/{a}/apps/apptemplates",
"APP_LOCAL": "/servicesNS/{u}/{a}/apps/local/",
"APP_INSTALL": "/servicesNS/{u}/{a}/apps/appinstall",
"AUTOMATIC_LOOKUP": "/servicesNS/{u}/{a}/data/props/lookups",
"AUTHENTICATION": "/services/authentication/users",
"CALCUALTED_FIELD": "/servicesNS/{u}/{a}/data/props/calcfields",
"CAPABILITIES": "/services/authorization/capabilities/",
"CHANGEPASSWORD": "/servicesNS/{u}/{a}/authentication/changepassword/",
"CONFIG": "/servicesNS/{u}/{a}/configs/{config}/",
"CLUSTER_CONFIG": "/servicesNS/{u}/{a}/cluster/config",
"CLUSTER_MASTER": "/servicesNS/{u}/{a}/cluster/master",
"CLUSTER_SEARCHHEAD": "/servicesNS/{u}/{a}/cluster/searchhead",
"CLUSTER_SLAVE": "/servicesNS/{u}/{a}/cluster/slave",
"DATAMODEL_REPORT": "/services/datamodel/pivot/{dm}",
"DATAMODEL_ACC": "/services/datamodel/model/",
"DATAMODEL": "/servicesNS/{u}/{a}/datamodel/model/",
"DATAMODEL_ACCELERATION": "/services/datamodel/acceleration",
"DATAMODEL_DOWNLOAD": "/servicesNS/{u}/{a}/data/models/{dm}/download",
"DATAMODEL_PIVOT": "/servicesNS/{u}/{a}/datamodel/pivot/",
"DEPLOYMENT_CLIENT_CONFIG": ("/servicesNS/{u}/{a}/deployment/client/config"),
"DEPLOYMENT_SERVER_CLASSES": (
"/servicesNS/{u}/{a}/deployment/server/serverclasses"
),
"DEPLOYMENT_SERVER_CONFIG": ("/servicesNS/{u}/{a}/deployment/server/config"),
"DEPLOYMENT_SERVER_CLIENTS": ("/servicesNS/{u}/{a}/deployment/server/clients"),
"DEPLOYMENT_SERVER_APPLICATION": (
"/servicesNS/{u}/{a}/deployment/server/applications"
),
"EVENTTYPE": "/servicesNS/{u}/{a}/saved/eventtypes",
"FIRED_ALERT": "/servicesNS/{u}/{a}/alerts/fired_alerts",
"FIELD": "/servicesNS/{u}/{a}/search/fields",
"FIELD_ALIAS": "/servicesNS/{u}/{a}/data/props/fieldaliases",
"FIELD_EXTRACTION": "/servicesNS/{u}/{a}/data/props/extractions",
"FVTAG": "/servicesNS/{u}/{a}/saved/fvtags",
"HTTPAUTH_TOKEN": "/servicesNS/{u}/{a}/authentication/httpauth-tokens",
"INDEX": "/servicesNS/{u}/{a}/data/indexes/",
"INPUT_MONITOR": "/servicesNS/{u}/{a}/data/inputs/monitor",
"INPUT_ONESHOT": "/servicesNS/{u}/{a}/data/inputs/oneshot",
"INPUT_SCRIPT": "/servicesNS/{u}/{a}/data/inputs/script",
"INPUT_TCP_COOKED": "/servicesNS/{u}/{a}/data/inputs/tcp/cooked",
"INPUT_TCP_RAW": "/servicesNS/{u}/{a}/data/inputs/tcp/raw",
"INPUT_UDP": "/servicesNS/{u}/{a}/data/inputs/udp",
"INPUT_EVENTLOG": ("/servicesNS/{u}/{a}/data/inputs/win-event-log-collections"),
"INPUT_REGMON": "/servicesNS/{u}/{a}/data/inputs/WinRegMon",
"INPUT_PERFMON": "/servicesNS/{u}/{a}/data/inputs/win-perfmon",
"INPUT_HOSTMON": "/servicesNS/{u}/{a}/data/inputs/WinHostMon",
"INPUT_NETMON": "/servicesNS/{u}/{a}/data/inputs/WinNetMon",
"INPUT_ADMON": "/servicesNS/{u}/{a}/data/inputs/ad",
"INPUT_PRINTMON": "/servicesNS/{u}/{a}/data/inputs/WinPrintMon",
"JOB": "/servicesNS/{u}/{a}/search/jobs",
"LDAP": "/services/authentication/providers/LDAP/",
"LOOKUP": "/servicesNS/{u}/{a}/data/props/lookups/",
"LOOKUP_TRANSFORM": "/servicesNS/{u}/{a}/data/transforms/lookups/",
"LOOKUP_TABLE_FILES": "/servicesNS/{u}/{a}/data/lookup-table-files",
"LOGIN": "/services/auth/login",
"MACRO": "/servicesNS/{u}/{a}/data/macros",
"MESSAGES": "/servicesNS/{u}/{a}/messages",
"NAVIGATION": "/servicesNS/{u}/{a}/data/ui/nav",
"NTAG": "/servicesNS/{u}/{a}/saved/ntags",
"OPEN_IN_PIVOT_GENERATE": "/services/datamodel/generate",
"PROPERTIES": "/servicesNS/{u}/{a}/properties",
"ROLE": "/services/authorization/roles/",
"REFRESH": "/debug/refresh",
"SAVED_SEARCH": "/servicesNS/{u}/{a}/saved/searches",
"SCHEDULED_VIEW": "/servicesNS/{u}/{a}/scheduled/views",
"SEARCH_COMMANDS": "/servicesNS/{u}/{a}/search/commands",
"SOURCETYPE": "/servicesNS/{u}/{a}/saved/sourcetypes",
"SERVER_CONTROL_RESTART": "/services/server/control/restart/",
"SERVER_SETTINGS": "/services/{u}/server-settings/settings",
"ACCESS_CONTROL_XML": "/services/data/ui/manager/accesscontrols",
"TAG": "/servicesNS/{u}/{a}/search/tags",
"TIME": "/servicesNS/{u}/{a}/data/ui/times",
"TRANSFORMS_EXTRACTION": ("/servicesNS/{u}/{a}/data/transforms/extractions"),
"TRANSFORMS_LOOKUP": "/servicesNS/{u}/{a}/data/transforms/lookups/",
"TRANSPARENT_SUMMARIZATION": "/servicesNS/{u}/{a}/admin/summarization",
"TYPEAHEAD": "/servicesNS/{u}/{a}/search/typeahead/",
"USER": "/servicesNS/{u}/{a}/authentication/users",
"UI_MANAGER": "/servicesNS/{u}/{a}/data/ui/manager",
"UI_PREFS": "/servicesNS/{u}/{a}/admin/ui-prefs",
"USER_PREFS": "/servicesNS/{u}/{a}/admin/user-prefs",
"VIEW": "/servicesNS/{u}/{a}/data/ui/views",
"VIEWSTATES": "/servicesNS/{u}/{a}/data/ui/viewstates",
"VIX_INDEXES": "/servicesNS/{u}/{a}/data/vix-indexes",
"VIX_PROVIDERS": "/servicesNS/{u}/{a}/data/vix-providers",
"WORKFLOW_ACTION": "/servicesNS/{u}/{a}/data/ui/workflow-actions",
"RELOAD_ENDPOINT": "/services/configs/conf-{conf}/_reload",
"ROLL_HOT_TO_COLD": "/services/data/indexes/{index}/chill-bucket?bucket_id={bucket_id}",
"INDEXER_S2S_TOKEN": "/services/data/inputs/tcp/splunktcptoken",
"FORWARDER_S2S_TOKEN": "/services/data/outputs/tcp/group",
"SAML": "/services/authentication/providers/SAML",
"JOBS_CREATED_FROM_SAVED_SEARCH": "/servicesNS/{u}/{a}/saved/searches/{name}/history",
"SAML_USER_ROLE_MAP": "/services/{u}/SAML-user-role-map",
"SAML_GROUP": "/services/{u}/SAML-groups",
"SAML_METADATA": "services/{u}/SAML-sp-metadata",
"SAML_AUTH": "/services/{u}/SAML-auth",
"SPLUNK_AUTH": "/services/admin/Splunk-auth/splunk_auth",
}
|
StarcoderdataPython
|
11280213
|
from selenium import webdriver
chrome = webdriver.Chrome()
chrome.implicitly_wait(5)
chrome.get("https://yandex.ru")
home_tabs = chrome.find_element_by_css_selector("[data-id='market']")
# https://stackoverflow.com/questions/43627340/what-is-the-difference-between-property-and-attribute-in-selenium-webelement
html = home_tabs.get_property("innerHTML")
print(html)
attr = home_tabs.get_attribute("data-bem")
print(attr)
css = home_tabs.value_of_css_property("margin-bottom")
print(css)
chrome.close()
|
StarcoderdataPython
|
383498
|
<filename>hico_det/model/util.py<gh_stars>1-10
import torch
import os
import math
import time
import sys
import math
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np
from collections import OrderedDict
device_ids=[0,2]
obj_hoi_index = [(0, 0), (161, 170), (11, 24), (66, 76), (147, 160), (1, 10), (55, 65), (187, 194), (568, 576),
(32, 46), (563, 567), (326, 330), (503, 506), (415, 418), (244, 247), (25, 31), (77, 86), (112, 129),
(130, 146), (175, 186), (97, 107), (314, 325), (236, 239), (596, 600), (343, 348), (209, 214), (577, 584),
(353, 356), (539, 546), (507, 516), (337, 342), (464, 474), (475, 483), (489, 502), (369, 376), (225, 232),
(233, 235), (454, 463), (517, 528), (534, 538), (47, 54), (589, 595), (296, 305), (331, 336), (377, 383),
(484, 488), (253, 257), (215, 224), (199, 208), (439, 445), (398, 407), (258, 264), (274, 283), (357, 363),
(419, 429), (306, 313), (265, 273), (87, 92), (93, 96), (171, 174), (240, 243), (108, 111), (551, 558),
(195, 198), (384, 389), (394, 397), (435, 438),(364, 368), (284, 290), (390, 393), (408, 414), (547, 550),
(450, 453), (430, 434), (248, 252), (291, 295),(585, 588), (446, 449), (529, 533), (349, 352), (559, 562)
]
hico_classes = ['__background__', # always index 0
'airplane', 'apple', 'backpack', 'banana', 'baseball_bat', 'baseball_glove', 'bear', 'bed', 'bench',
'bicycle', 'bird', 'boat', 'book', 'bottle', 'bowl', 'broccoli', 'bus', 'cake', 'car', 'carrot', 'cat',
'cell_phone', 'chair', 'clock', 'couch', 'cow', 'cup', 'dining_table', 'dog', 'donut', 'elephant',
'fire_hydrant', 'fork', 'frisbee', 'giraffe', 'hair_drier', 'handbag', 'horse', 'hot_dog', 'keyboard',
'kite', 'knife', 'laptop', 'microwave', 'motorcycle', 'mouse', 'orange', 'oven', 'parking_meter',
'person', 'pizza', 'potted_plant', 'refrigerator', 'remote', 'sandwich', 'scissors', 'sheep', 'sink',
'skateboard', 'skis', 'snowboard', 'spoon', 'sports_ball', 'stop_sign', 'suitcase', 'surfboard',
'teddy_bear', 'tennis_racket', 'tie', 'toaster', 'toilet', 'toothbrush', 'traffic_light', 'train',
'truck', 'tv', 'umbrella', 'vase', 'wine_glass', 'zebra']
def get_box_index(batch_size):
batch_index_list = [i for i in range(batch_size)]
box_index_data = torch.IntTensor(batch_index_list)
box_index = to_varabile(box_index_data) # ([0,1,2,3....,batch-1])
return box_index
def to_varabile(tensor, is_cuda=True):
if is_cuda:
tensor=tensor.cuda()
return tensor
def process_multi_batch(batch):
"""
:param batch:
:return:
batch[0]:image_tensor
batch[1]:rois_h_tensor
batch[2]:rois_o_tensor
batch[3]:pair_tensor
batch[4]:obj_det_score_tensor
batch[5]:img_path
batch[6]:human_bboxes
batch[7]:obj_bboxes
batch[8]:action_tensor
"""
batch_imgs_arr = batch[0][0].transpose(0, 3, 2, 1)
batch_rois_h_arr = batch[0][1]
batch_rois_o_arr = batch[0][2]
batch_pair_posi_arr = batch[0][3].transpose(0, 3, 2, 1)
batch_obj_det_s_arr = batch[0][4]
batch_img_path = batch[0][5]
batch_human_bboxes = batch[0][6]
batch_obj_bboxes = batch[0][7]
# batch_point_arr = batch[0][8].transpose(0, 3, 2, 1)
# label
batch_action_arr = batch[1]
batch_imgs_tensor = torch.from_numpy(batch_imgs_arr)
batch_imgs_tensor = to_varabile(batch_imgs_tensor.float())
batch_rois_h_arr_tensor = torch.from_numpy(batch_rois_h_arr)
batch_rois_h_arr_tensor = to_varabile(batch_rois_h_arr_tensor.float())
batch_rois_o_arr_tensor = torch.from_numpy(batch_rois_o_arr)
batch_rois_o_arr_tensor = to_varabile(batch_rois_o_arr_tensor.float())
batch_pair_posi_arr_tensor = torch.from_numpy(batch_pair_posi_arr)
batch_pair_posi_arr_tensor = to_varabile(batch_pair_posi_arr_tensor.float())
batch_obj_det_s_arr_tensor = torch.from_numpy(batch_obj_det_s_arr)
batch_obj_det_s_arr_tensor = to_varabile(batch_obj_det_s_arr_tensor.float())
#
# batch_point_arr_tensor = torch.from_numpy(batch_point_arr)
# batch_point_arr_tensor = to_varabile(batch_point_arr_tensor)
batch_action_tensor = torch.from_numpy(batch_action_arr).type(torch.FloatTensor)#.type(torch.FloatTensor)
batch_action_tensor = to_varabile(batch_action_tensor)
return batch_imgs_tensor,batch_rois_h_arr_tensor,batch_rois_o_arr_tensor,batch_pair_posi_arr_tensor,\
batch_obj_det_s_arr_tensor,batch_img_path,batch_human_bboxes,batch_obj_bboxes,\
batch_action_tensor
def process_batch(batch):
"""
:param batch:
:return:
batch[0]:image_tensor
batch[1]:rois_h_tensor
batch[2]:rois_o_tensor
batch[3]:pair_tensor
batch[4]:obj_det_score_tensor
batch[5]:img_path
batch[6]:human_bboxes
batch[7]:obj_bboxes
batch[8]:action_tensor
"""
batch_imgs_arr = batch[0][0].transpose(0, 3, 2, 1)
batch_rois_h_arr = batch[0][1]
batch_rois_o_arr = batch[0][2]
batch_pair_posi_arr = batch[0][3].transpose(0, 3, 2, 1)
batch_obj_det_s_arr = batch[0][4]
batch_img_path = batch[0][5]
batch_human_bboxes = batch[0][6]
batch_obj_bboxes = batch[0][7]
# batch_point_arr = batch[0][8].transpose(0, 3, 2, 1)
# label
batch_action_arr = batch[1]
batch_imgs_tensor = torch.from_numpy(batch_imgs_arr)
batch_imgs_tensor = to_varabile(batch_imgs_tensor.float())
batch_rois_h_arr_tensor = torch.from_numpy(batch_rois_h_arr)
batch_rois_h_arr_tensor = to_varabile(batch_rois_h_arr_tensor.float())
batch_rois_o_arr_tensor = torch.from_numpy(batch_rois_o_arr)
batch_rois_o_arr_tensor = to_varabile(batch_rois_o_arr_tensor.float())
batch_pair_posi_arr_tensor = torch.from_numpy(batch_pair_posi_arr)
batch_pair_posi_arr_tensor = to_varabile(batch_pair_posi_arr_tensor.float())
batch_obj_det_s_arr_tensor = torch.from_numpy(batch_obj_det_s_arr)
batch_obj_det_s_arr_tensor = to_varabile(batch_obj_det_s_arr_tensor.float())
#
# batch_point_arr_tensor = torch.from_numpy(batch_point_arr)
# batch_point_arr_tensor = to_varabile(batch_point_arr_tensor)
batch_action_tensor = torch.from_numpy(batch_action_arr)
batch_action_tensor = to_varabile(batch_action_tensor)
return batch_imgs_tensor,batch_rois_h_arr_tensor,batch_rois_o_arr_tensor,batch_pair_posi_arr_tensor,\
batch_obj_det_s_arr_tensor,batch_img_path,batch_human_bboxes,batch_obj_bboxes,\
batch_action_tensor
class FocalLoss(nn.Module):
def __init__(self, focusing_param=8, balance_param=0.25):#focusing_param代表gamma
super(FocalLoss, self).__init__()
self.focusing_param = focusing_param
self.balance_param = balance_param
def forward(self, output, target):
cross_entropy = F.cross_entropy(output, target)
cross_entropy_log = torch.log(cross_entropy)
logpt = - F.cross_entropy(output, target)
pt = torch.exp(logpt)
focal_loss = -((1 - pt) ** self.focusing_param) * logpt
balanced_focal_loss = self.balance_param * focal_loss
return balanced_focal_loss
def accuracy(output, target, topk=(5,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def from_one_hot_to_list(one_hot_tensor):
one_hot_list = one_hot_tensor.clone().cpu().detach().numpy().tolist()
list_ = []
for one_hot in one_hot_list:
list_.append(one_hot.index(1))
return np.array(list_)
def multi_accuracy(output, target,topk=(5,)):
maxk = max(topk)
target=torch.from_numpy(from_one_hot_to_list(target)).cuda()
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
# print('shape1',output.shape)
# print('shape2',target.shape)
class Timer(object):
"""A simple timer."""
def __init__(self):
self.reset()
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def format_time(seconds):
days = int(seconds / 3600/24)
seconds = seconds - days*3600*24
hours = int(seconds / 3600)
seconds = seconds - hours*3600
minutes = int(seconds / 60)
seconds = seconds - minutes*60
secondsf = int(seconds)
seconds = seconds - secondsf
millis = int(seconds*1000)
f = ''
i = 1
if days > 0:
f += str(days) + 'D'
i += 1
if hours > 0 and i <= 2:
f += str(hours) + 'h'
i += 1
if minutes > 0 and i <= 2:
f += str(minutes) + 'm'
i += 1
if secondsf > 0 and i <= 2:
f += str(secondsf) + 's'
i += 1
if millis > 0 and i <= 2:
f += str(millis) + 'ms'
i += 1
if f == '':
f = '0ms'
return f
def adjust_learning_rate(ori_lr,optimizer, epoch,batch_index,lr_decay_step):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
rate = epoch // lr_decay_step
urate=epoch%lr_decay_step
if epoch>1 and urate==0 and batch_index==1:
lr = ori_lr * (0.1 ** rate)
optimizer.param_groups[0]['lr']=lr
def get_loss_weighted():
with open('./data/hico_data/hico_hoi_count_train.txt','r') as f:
lines=f.readlines()
hoi_class_ins=[]
hoi_num=0
for line in lines:
hoi_class_ins.append(int(line.strip().split(' ')[-1]))
for i in hoi_class_ins:
hoi_num+=i
hoi_class_weight = [hoi_num / float(ins) for ins in hoi_class_ins]
return torch.from_numpy(np.array(hoi_class_weight))
def get_obj_score():
with open('/home/priv-lab1/workspace/zxh/HOI/hico_det/data/hico_data/hico_hoi_count_train.txt') as f:
lines=f.readlines()
action_dict=OrderedDict()
for i,line in enumerate(lines):
if line.strip().split(' ')[1] in action_dict:
action_dict[line.strip().split(' ')[1]]['end']=i+1
else:
action_dict[line.strip().split(' ')[1]] = {'start':i+1}
print(action_dict)
if __name__ == '__main__':
# get_loss_weighted()
get_obj_score()
|
StarcoderdataPython
|
338299
|
# -*- coding: utf-8; -*-
import os.path
import shutil
from ano.commands.base import Command
class Clean(Command):
"""
Remove all intermediate compilation files and directories completely.
In fact `.build' directory is simply removed.
"""
name = 'clean'
help_line = "Remove intermediate compilation files completely"
def run(self, args):
if os.path.isdir(self.e.output_dir):
shutil.rmtree(self.e.output_dir)
|
StarcoderdataPython
|
3303015
|
import unittest
from flow.core.params import SumoParams, EnvParams, InitialConfig, \
NetParams, SumoCarFollowingParams, SumoLaneChangeParams
from flow.core.params import VehicleParams
from flow.controllers.routing_controllers import ContinuousRouter
from flow.controllers.car_following_models import IDMController
from flow.controllers import RLController
from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS
from flow.utils.exceptions import FatalFlowError
from flow.envs import Env, TestEnv
from tests.setup_scripts import ring_road_exp_setup, highway_exp_setup
import os
import numpy as np
import gym.spaces as spaces
os.environ["TEST_FLAG"] = "True"
# colors for vehicles
WHITE = (255, 255, 255)
CYAN = (0, 255, 255)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
class TestShuffle(unittest.TestCase):
"""
Tests that, at resets, the ordering of vehicles changes while the starting
position values stay the same.
"""
def setUp(self):
# turn on vehicle arrangement shuffle
env_params = EnvParams(
additional_params=ADDITIONAL_ENV_PARAMS)
# place 5 vehicles in the network (we need at least more than 1)
vehicles = VehicleParams()
vehicles.add(
veh_id="test",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
num_vehicles=5)
initial_config = InitialConfig(x0=5, shuffle=True)
# create the environment and network classes for a ring road
self.env, _ = ring_road_exp_setup(
env_params=env_params,
initial_config=initial_config,
vehicles=vehicles)
def tearDown(self):
# terminate the traci instance
self.env.terminate()
# free data used by the class
self.env = None
def test_shuffle(self):
ids = self.env.k.vehicle.get_ids()
# position of vehicles before reset
before_reset = [self.env.k.vehicle.get_x_by_id(veh_id)
for veh_id in ids]
# reset the environment
self.env.reset()
# position of vehicles after reset
after_reset = [self.env.k.vehicle.get_x_by_id(veh_id)
for veh_id in ids]
self.assertCountEqual(before_reset, after_reset)
class TestEmissionPath(unittest.TestCase):
"""
Tests that the default emission path of an environment is set to None.
If it is not None, then sumo starts accumulating memory.
"""
def setUp(self):
# set sim_params to default
sim_params = SumoParams()
# create the environment and network classes for a ring road
self.env, _ = ring_road_exp_setup(sim_params=sim_params)
def tearDown(self):
# terminate the traci instance
self.env.terminate()
# free data used by the class
self.env = None
def test_emission(self):
self.assertIsNone(self.env.sim_params.emission_path)
class TestApplyingActionsWithSumo(unittest.TestCase):
"""
Tests the apply_acceleration, apply_lane_change, and choose_routes
functions in base.py
"""
def setUp(self):
# create a 2-lane ring road network
additional_net_params = {
"length": 230,
"lanes": 3,
"speed_limit": 30,
"resolution": 40
}
net_params = NetParams(additional_params=additional_net_params)
# turn on starting position shuffle
env_params = EnvParams(
additional_params=ADDITIONAL_ENV_PARAMS)
# place 5 vehicles in the network (we need at least more than 1)
vehicles = VehicleParams()
vehicles.add(
veh_id="test",
acceleration_controller=(IDMController, {}),
routing_controller=(ContinuousRouter, {}),
car_following_params=SumoCarFollowingParams(
accel=1000, decel=1000),
lane_change_params=SumoLaneChangeParams(
lane_change_mode=0),
num_vehicles=5)
# create the environment and network classes for a ring road
self.env, _ = ring_road_exp_setup(
net_params=net_params, env_params=env_params, vehicles=vehicles)
def tearDown(self):
# terminate the traci instance
self.env.terminate()
# free data used by the class
self.env = None
def test_apply_acceleration(self):
"""
Tests that, in the absence of all failsafes, the acceleration requested
from sumo is equal to the acceleration witnessed in between steps. Also
ensures that vehicles can never have velocities below zero given any
acceleration.
"""
ids = self.env.k.vehicle.get_ids()
vel0 = np.array(
[self.env.k.vehicle.get_speed(veh_id) for veh_id in ids])
# apply a certain set of accelerations to the vehicles in the network
accel_step0 = np.array([0, 1, 4, 9, 16])
self.env.k.vehicle.apply_acceleration(veh_ids=ids, acc=accel_step0)
self.env.k.simulation.simulation_step()
self.env.k.vehicle.update(False)
# compare the new velocity of the vehicles to the expected velocity
# given the accelerations
vel1 = np.array([
self.env.k.vehicle.get_speed(veh_id)
for veh_id in ids
])
expected_vel1 = (vel0 + accel_step0 * 0.1).clip(min=0)
np.testing.assert_array_almost_equal(vel1, expected_vel1, 1)
# apply a set of decelerations
accel_step1 = np.array([-16, -9, -4, -1, 0])
self.env.k.vehicle.apply_acceleration(veh_ids=ids, acc=accel_step1)
self.env.k.simulation.simulation_step()
self.env.k.vehicle.update(False)
# this time, some vehicles should be at 0 velocity (NOT less), and sum
# are a result of the accelerations that took place
vel2 = np.array([
self.env.k.vehicle.get_speed(veh_id)
for veh_id in ids
])
expected_vel2 = (vel1 + accel_step1 * 0.1).clip(min=0)
np.testing.assert_array_almost_equal(vel2, expected_vel2, 1)
def test_apply_lane_change_errors(self):
"""
Ensures that apply_lane_change raises ValueErrors when it should
"""
self.env.reset()
ids = self.env.k.vehicle.get_ids()
# make sure that running apply lane change with a invalid direction
# values leads to a ValueError
bad_directions = np.array([-1, 0, 1, 2, 3])
self.assertRaises(
ValueError,
self.env.k.vehicle.apply_lane_change,
veh_ids=ids,
direction=bad_directions)
def test_apply_lane_change_direction(self):
"""
Tests the direction method for apply_lane_change. Ensures that the lane
change action requested from sumo is the same as the lane change that
occurs, and that vehicles attempting do not issue lane changes in there
is no lane in te requested direction.
"""
self.env.reset()
ids = self.env.k.vehicle.get_ids()
lane0 = np.array(
[self.env.k.vehicle.get_lane(veh_id) for veh_id in ids])
max_lanes = self.env.net_params.additional_params['lanes']
# perform lane-changing actions using the direction method
direction0 = np.array([0, 1, 0, 1, -1])
self.env.k.vehicle.apply_lane_change(ids, direction=direction0)
self.env.k.simulation.simulation_step()
self.env.k.vehicle.update(False)
# check that the lane vehicle lane changes to the correct direction
# without skipping lanes
lane1 = np.array([
self.env.k.vehicle.get_lane(veh_id)
for veh_id in ids
])
expected_lane1 = (lane0 + np.sign(direction0)).clip(
min=0, max=max_lanes - 1)
np.testing.assert_array_almost_equal(lane1, expected_lane1, 1)
# perform lane-changing actions using the direction method one more
# time to test lane changes to the right
direction1 = np.array([-1, -1, -1, -1, -1])
self.env.k.vehicle.apply_lane_change(ids, direction=direction1)
self.env.k.simulation.simulation_step()
self.env.k.vehicle.update(False)
# check that the lane vehicle lane changes to the correct direction
# without skipping lanes
lane2 = np.array([
self.env.k.vehicle.get_lane(veh_id)
for veh_id in ids
])
expected_lane2 = (lane1 + np.sign(direction1)).clip(
min=0, max=max_lanes - 1)
np.testing.assert_array_almost_equal(lane2, expected_lane2, 1)
class TestWarmUpSteps(unittest.TestCase):
"""Ensures that the appropriate number of warmup steps are run when using
flow.core.params.EnvParams.warmup_steps"""
def test_it_works(self):
warmup_step = 5 # some value
# start an environment with a number of simulations per step greater
# than one
env_params = EnvParams(
warmup_steps=warmup_step, additional_params=ADDITIONAL_ENV_PARAMS)
env, _ = ring_road_exp_setup(env_params=env_params)
# time before running a reset
t1 = 0
# perform a reset
env.reset()
# time after a reset
t2 = env.time_counter
# ensure that the difference in time is equal to sims_per_step
self.assertEqual(t2 - t1, warmup_step)
class TestSimsPerStep(unittest.TestCase):
"""Ensures that the appropriate number of simultaions are run at any given
steps when using flow.core.params.EnvParams.sims_per_step"""
def test_it_works(self):
sims_per_step = 5 # some value
# start an environment with a number of simulations per step greater
# than one
env_params = EnvParams(
sims_per_step=sims_per_step,
additional_params=ADDITIONAL_ENV_PARAMS)
env, _ = ring_road_exp_setup(env_params=env_params)
env.reset()
# time before running a step
t1 = env.time_counter
# perform a step
env.step(rl_actions=[])
# time after a step
t2 = env.time_counter
# ensure that the difference in time is equal to sims_per_step
self.assertEqual(t2 - t1, sims_per_step)
class TestAbstractMethods(unittest.TestCase):
"""
These series of tests are meant to ensure that the environment abstractions
exist and are in fact abstract, i.e. they will raise errors if not
implemented in a child class.
"""
def setUp(self):
env, network = ring_road_exp_setup()
sim_params = SumoParams() # FIXME: make ambiguous
env_params = EnvParams()
self.env = Env(sim_params=sim_params,
env_params=env_params,
network=network)
def tearDown(self):
self.env.terminate()
self.env = None
def test_get_state(self):
"""Checks that get_state raises an error."""
self.assertRaises(NotImplementedError, self.env.get_state)
def test_compute_reward(self):
"""Checks that compute_reward returns 0."""
self.assertEqual(self.env.compute_reward([]), 0)
def test__apply_rl_actions(self):
self.assertRaises(NotImplementedError, self.env._apply_rl_actions,
rl_actions=None)
class TestVehicleColoring(unittest.TestCase):
def test_all(self):
vehicles = VehicleParams()
vehicles.add("human", num_vehicles=10)
# add an RL vehicle to ensure that its color will be distinct
vehicles.add("rl", acceleration_controller=(RLController, {}),
num_vehicles=1)
_, network = ring_road_exp_setup(vehicles=vehicles)
env = TestEnv(EnvParams(), SumoParams(), network)
env.reset()
# set one vehicle as observed
env.k.vehicle.set_observed("human_0")
# update the colors of all vehicles
env.step(rl_actions=None)
# check that, when rendering is off, the colors don't change (this
# avoids unnecessary API calls)
for veh_id in env.k.vehicle.get_ids():
self.assertEqual(env.k.vehicle.get_color(veh_id), YELLOW)
# a little hack to ensure the colors change
env.sim_params.render = True
# set one vehicle as observed
env.k.vehicle.set_observed("human_0")
# update the colors of all vehicles
env.step(rl_actions=None)
# check the colors of all vehicles
for veh_id in env.k.vehicle.get_ids():
if veh_id in ["human_0"]:
self.assertEqual(env.k.vehicle.get_color(veh_id), CYAN)
elif veh_id == "rl_0":
self.assertEqual(env.k.vehicle.get_color(veh_id), RED)
else:
self.assertEqual(env.k.vehicle.get_color(veh_id), WHITE)
class TestNotEnoughVehicles(unittest.TestCase):
"""Tests that when not enough vehicles spawn an error is raised."""
def test_num_spawned(self):
initial_config = InitialConfig(
spacing="custom",
additional_params={
'start_positions': [('highway_0', 0), ('highway_0', 0)],
'start_lanes': [0, 0]}
)
vehicles = VehicleParams()
vehicles.add('test', num_vehicles=2)
self.assertRaises(FatalFlowError,
highway_exp_setup,
initial_config=initial_config,
vehicles=vehicles)
class BoxEnv(Env):
"""A mock-up class to test clipping for Box."""
def get_state(self):
pass
@property
def action_space(self):
return spaces.Box(low=0, high=1, shape=(3,))
@property
def observation_space(self):
pass
def _apply_rl_actions(self, rl_actions):
pass
class TestClipBoxActions(unittest.TestCase):
"""
This tests base environment properly clips box actions per
specification.
"""
def setUp(self):
env, network = ring_road_exp_setup()
sim_params = SumoParams()
env_params = EnvParams()
self.env = BoxEnv(
sim_params=sim_params,
env_params=env_params,
scenario=network)
def tearDown(self):
self.env.terminate()
self.env = None
def test_clip_box_actions(self):
"""Test whether box actions get properly clipped."""
actions = [0.5, -1, 2]
clipped_actions = [0.5, 0, 1]
_actions = self.env.clip_actions(actions)
self.assertTrue((_actions == clipped_actions).all())
class TupleEnv(Env):
"""A mock-up class to test clipping for Tuple."""
def get_state(self):
pass
@property
def action_space(self):
return spaces.Tuple([
spaces.Box(low=0, high=255, shape=(1,)),
spaces.Box(low=0, high=1, shape=(3,)),
spaces.Discrete(3)])
@property
def observation_space(self):
pass
def _apply_rl_actions(self, rl_actions):
pass
class TestClipTupleActions(unittest.TestCase):
"""
This tests base environment properly clips tuple actions based on
specification in each individual Box objects.
"""
def setUp(self):
env, scenario = ring_road_exp_setup()
sim_params = SumoParams()
env_params = EnvParams()
self.env = TupleEnv(
sim_params=sim_params,
env_params=env_params,
scenario=scenario)
def tearDown(self):
self.env.terminate()
self.env = None
def test_clip_tuple_actions(self):
"""Test whether tuple actions get properly clipped."""
actions = [
[-1],
[0.5, -1, 2],
2
]
clipped_actions = [
[0],
[0.5, 0, 1],
2
]
_actions = self.env.clip_actions(actions)
self.assertEquals(_actions[0], clipped_actions[0])
self.assertTrue((_actions[1] == clipped_actions[1]).all())
self.assertEquals(_actions[2], clipped_actions[2])
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
4898602
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ReplicaLifecycleDescription(Model):
"""Describes how the replica will behave.
:param is_singleton_replica_move_allowed_during_upgrade: If set to true,
replicas with a target replica set size of 1 will be permitted to move
during upgrade.
:type is_singleton_replica_move_allowed_during_upgrade: bool
:param restore_replica_location_after_upgrade: If set to true, move/swap
replica to original location after upgrade.
:type restore_replica_location_after_upgrade: bool
"""
_attribute_map = {
'is_singleton_replica_move_allowed_during_upgrade': {'key': 'IsSingletonReplicaMoveAllowedDuringUpgrade', 'type': 'bool'},
'restore_replica_location_after_upgrade': {'key': 'RestoreReplicaLocationAfterUpgrade', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(ReplicaLifecycleDescription, self).__init__(**kwargs)
self.is_singleton_replica_move_allowed_during_upgrade = kwargs.get('is_singleton_replica_move_allowed_during_upgrade', None)
self.restore_replica_location_after_upgrade = kwargs.get('restore_replica_location_after_upgrade', None)
|
StarcoderdataPython
|
1962403
|
# this is an AWS inventory for RDS
# you must supply an AWS IAM keypair with Read to RDS across specified regions
# output is to STDOUT in white space delimited (except Tags are comma
# seperated)
# feel free to change the fields to any variable of the DBInstance boto class
# but please sanitize changes to these fields
import argparse
from pprint import pprint
import boto.rds
import unicodedata
access_key = ''
secret_key = ''
def get_rds_instances(region,fields):
rds_conn = boto.rds.connect_to_region(region, aws_access_key_id=access_key, aws_secret_access_key=secret_key)
dbinstances = rds_conn.get_all_dbinstances()
for instance in dbinstances:
#pprint(i.__dict__)
# trying to use variable of class DBInstance as string to sanitize in loop fails
#for field in fields:
#instance."field" = str(instance."field")
# print(instance.field)
# stuck sanitizing each field
instance.id = str(instance.id)
instance.instance_class = str(instance.instance_class)
instance.engine = str(instance.engine)
instance.multi_az = str(instance.multi_az)
instance.availability_zone = str(instance.availability_zone)
instance.allocated_storage = str(instance.allocated_storage)
print instance.id, instance.instance_class, instance.engine, instance.multi_az, instance.availability_zone, instance.allocated_storage
def main():
# which regions to pull from
# note AZs for each region are included
regions = [ 'us-east-1','us-west-1','us-west-2','eu-west-1','sa-east-1',
'ap-southeast-1','ap-southeast-2','ap-northeast-1', 'eu-central-1', 'ap-northeast-2' ]
# which fields to print from DBInstance class attributes
fields = [ 'id', 'instance_class', 'engine', 'multi_az', 'availability_zone', 'allocated_storage' ]
parser = argparse.ArgumentParser()
parser.add_argument('access_key', help='Access Key');
parser.add_argument('secret_key', help='Secret Key');
args = parser.parse_args()
global access_key
global secret_key
access_key = args.access_key
secret_key = args.secret_key
print("DB ID, Instance class, Engine, MultiAZ, AZ, Storage")
for region in regions: get_rds_instances(region,fields)
if __name__ =='__main__':main()
|
StarcoderdataPython
|
11225237
|
<reponame>vijaykumawat256/Prompt-Summarization
def dominator(arr):
|
StarcoderdataPython
|
3417107
|
<filename>ecommerceApplicationServer/ecommerce/migrations/0003_product_productstatus.py
# Generated by Django 4.0.2 on 2022-02-16 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ecommerce', '0002_product'),
]
operations = [
migrations.AddField(
model_name='product',
name='productStatus',
field=models.BooleanField(default=None),
preserve_default=False,
),
]
|
StarcoderdataPython
|
338013
|
<gh_stars>1-10
config={}
with open('etc/apt/config', 'r') as config_file:
exec(config_file.read(), config)
import os
import subprocess
import sys
import re
import io
import shutil
try:
import urllib2
except ImportError:
from urllib import request as urllib2
import bz2, lzma, gzip
try:
import cPickle as pickle
except ImportError:
import pickle
import tarfile
try:
from hashlib import md5
except ImportError:
from md5 import md5
desc="""
apt-get {update | upgrade | install pkg ... | remove pkg... | purge pkg... | -h}
"""
available_package_list_file = 'var/cache/apt/package_available.pkl'
installed_package_list_file = 'var/cache/apt/package_installed.pkl'
link_package_list_file = 'var/cache/apt/package_links.pkl'
package_folder = 'var/cache/apt/archives'
number_re = re.compile('([0-9]+)')
class DebianStringVersion:
def __init__(self, text):
self.text = text
def __cmp__(self, other):
i = 0
for i in range(0, min(len(self.text), len(other.text))):
if self.text[i] != other.text[i]:
if self.text[i] == '~':
return -1
elif other.text[i] == '~':
return 1
elif self.text[i] in ['-', '.', ':']:
if other.text[i] in ['-', '.', ':']:
return ord(self.text[i]) - ord(other.text[i])
else:
return -1
else:
if other.text[i] in ['-', '.', ':']:
return 1
else:
return ord(self.text[i]) - ord(other.text[i])
if len(self.text) > len(other.text):
if self.text[i] == '~':
return -1
else:
return 1
else:
if other.text[i] == '~':
return 1
else:
return -1
def __eq__(self, other):
return self.text == other.text
def __neq__(self, other):
return self.text == other.text
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __gt__(self, other):
return self.__cmp__(other) >= 0
assert(DebianStringVersion('52~m1-1~') < DebianStringVersion('52.1-3'))
def to_version(version_string):
result = []
epoch, __, version = version_string.partition(':')
if not version:
version = epoch
epoch = 0
else:
epoch = int(epoch)
upstream_version, __, debian_version = version.rpartition('-')
if not upstream_version:
upstream_version = debian_version
debian_version = ''
result.append(epoch)
result += [int(text) if text.isdigit() else DebianStringVersion(text) for text in number_re.split(upstream_version)]
result += [int(text) if text.isdigit() else DebianStringVersion(text) for text in number_re.split(debian_version)]
return result
class Package:
KEYS = [
'Depends',
'Pre-Depends',
'MD5sum',
'Size',
'Installed-Size',
'Version',
'Filename'
]
def __init__(self, package_name, package_arch):
self.package_name = package_name
self.package_arch = package_arch
self.Depends = ''
setattr(self, 'Pre-Depends', '')
self.filename = ''
def compatible(self, version_check):
if version_check:
assert(version_check[0] == '(')
assert(version_check[-1] == ')')
op, version = version_check[1:-1].split(' ')
version = to_version(version)
my_version = to_version(self.Version)
if op == '<<':
return my_version < version
if op == '>>':
return my_version > version
if op == '=':
return my_version == version
if op == '<=':
return my_version <= version
if op == '>=':
return my_version >= version
raise Exception('unknown op: %s' % op)
else:
return True
def __str__(self):
return '%s:%s [%s]' % (self.package_name, self.package_arch, self.Version)
def generate_ld_conf():
with open('etc/ld.so.conf', 'w') as ld_so_conf:
try:
confs = os.listdir('etc/ld.so.conf.d')
except OSError:
pass
else:
for conf in confs:
with open('etc/ld.so.conf.d/%s'%conf) as conf_file:
ld_so_conf.write(conf_file.read())
ld_so_conf.write('\n')
def usage():
print(desc)
def download(url):
def chunk_report(bytes_so_far, chunk_size, total_size):
if total_size:
percent = float(bytes_so_far) / total_size
percent = round(percent*100, 2)
sys.stdout.write('\r[%0.2f%%] %s...'%(percent, url))
sys.stdout.flush()
else:
data_so_far = float(bytes_so_far)
unit = 'B'
if data_so_far > 1024*5:
data_so_far = data_so_far / 1024
unit = 'kB'
if data_so_far > 1024*5:
data_so_far = data_so_far / 1024
unit = 'MB'
sys.stdout.write('\r[%0.2f%s] %s...'%(data_so_far, unit, url))
sys.stdout.flush()
chunk_size = 8192
data = bytes()
response = urllib2.urlopen(url)
try:
total_size = response.info()['Content-length'].strip()
total_size = int(total_size)
except Exception as e:
print(e)
total_size = 0
bytes_so_far = 0
chunk_report(bytes_so_far, chunk_size, total_size)
while(1):
try:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
data += chunk
chunk_report(bytes_so_far, chunk_size, total_size)
except Exception as e:
print(e)
return None
print('')
return data
def run(*command):
p = subprocess.Popen(command, stdin = subprocess.PIPE, stdout = subprocess.PIPE, stderr = subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
if not isinstance(err, str):
err = err.decode('utf-8')
raise Exception(err)
return out
def load_database():
try:
with open(available_package_list_file, 'rb') as package_file:
available_packages = pickle.load(package_file)
except Exception:
raise Exception('No package database stored\n\n%s' % desc)
try:
with open(installed_package_list_file, 'rb') as package_file:
installed_packages = pickle.load(package_file)
except Exception as e:
installed_packages = {}
for architectures,_,_,_ in config['MIRRORS']:
for arch in architectures:
installed_packages[arch] = {}
try:
with open(link_package_list_file, 'rb') as link_file:
installed_links = pickle.load(link_file)
except Exception:
installed_links = []
return available_packages, installed_packages, installed_links
def save_database(installed_packages, installed_links):
with open(installed_package_list_file, 'wb') as package_file:
pickle.dump(installed_packages, package_file)
with open(link_package_list_file, 'wb') as link_file:
pickle.dump(installed_links, link_file)
def ensure_directories_exist():
directory = os.path.split(available_package_list_file)[0]
try: os.makedirs(directory)
except OSError: pass
directory = os.path.split(installed_package_list_file)[0]
try: os.makedirs(directory)
except OSError: pass
try: os.makedirs(package_folder)
except OSError: pass
try: os.makedirs('etc')
except OSError: pass
def filter_packages_to_download(package_list):
result = []
for package in package_list:
filename = os.path.split(package.Filename)[1]
filepath = os.path.join(package_folder, filename)
if os.path.isfile(filepath):
with open(filepath, 'rb') as f:
m = md5()
m.update(f.read())
if m.hexdigest().lower() == package.MD5sum.lower():
continue
result.append(package)
return result
def do_download(package_list):
for package in package_list:
filename = os.path.split(package.Filename)[1]
filepath = os.path.join(package_folder, filename)
with open(filepath, 'wb') as f:
f.write(download(package.Url + '/' + package.Filename))
def do_delete_packages(package_list):
for package in package_list:
filename = os.path.split(package.Filename)[1]
filepath = os.path.join(package_folder, filename)
os.remove(filepath)
def do_install(package_list, directory_links):
links = []
def read_file(bytes, offset):
if offset & 1 == 1:
offset+=1
filename = bytes[offset:offset+16].decode('latin1').strip()
filesize = int(bytes[offset+48:offset+58].decode('latin1').strip())
return offset+60+filesize, filename, bytes[offset+60:offset+60+filesize]
for package in package_list[::-1]:
filename = os.path.split(package.Filename)[1]
filepath = os.path.join(package_folder, filename)
print(filename)
package.installed_files = []
package.installed_dirs = []
with open(filepath, 'rb') as archive:
bytes = archive.read()
i = 8
i, filename, data = read_file(bytes, i)
assert(filename == 'debian-binary')
while i < len(bytes)-1:
i, filename, data = read_file(bytes, i)
if filename.startswith('data.tar'):
tar = tarfile.open(fileobj=io.BytesIO(data))
for info in tar.getmembers():
if info.isdir():
package.installed_dirs.append(info.name)
try:
os.mkdir(info.name)
except OSError:
pass
elif info.isfile():
filename = info.name
if filename[0] == '/':
filename = '.'+filename
with open(filename, 'wb') as file:
file.write(tar.extractfile(info).read())
package.installed_files.append(filename)
elif info.islnk() or info.issym():
if sys.platform == 'win32':
links.append((package, info.name, info.linkname))
else:
if info.linkname[0] == '/':
info.linkname = os.path.relpath('.', os.path.split(info.name)[0]) + info.linkname
try:
os.symlink(info.linkname, info.name)
except FileExistsError:
os.unlink(info.name)
os.symlink(info.linkname, info.name)
else:
print (info.name)
while links:
old_links = links
links = []
for package, link_path, link_target in old_links:
if link_target[0] == '/':
relative_link_target = '.' + link_target
else:
relative_link_target = os.path.join(os.path.split(link_path)[0], link_target)
if os.path.isdir(relative_link_target):
package.installed_dirs.append(link_path)
directory_links.append((link_path, relative_link_target))
elif os.path.isfile(relative_link_target):
package.installed_files.append(link_path)
with open(relative_link_target, 'rb') as source:
with open(link_path, 'wb') as dest:
dest.write(source.read())
else:
links.append((package, link_path, link_target))
if len(old_links) == len(links):
print("Can't create links:\n " + '\n '.join('[%s] %s -> %s' % l for l in links))
break
for link_path, link_target in directory_links:
try:
shutil.rmtree(link_path)
except FileNotFoundError:
pass
shutil.copytree(link_target, link_path)
def do_uninstall(package_list):
pass
def info():
available_packages, installed_packages, links = load_database()
for package_arch, packages in installed_packages.items():
for package_name, package in packages.items():
print(package_name, ':', package_arch)
def update():
ensure_directories_exist()
packages = {}
mirrors = config['MIRRORS']
package_count = 0
package = None
for architectures, mirror, dists, repositories in mirrors:
for architecture in architectures:
packages[architecture] = {}
for dist in dists:
for repository in repositories:
for ext, decompress in (('xz', lzma.decompress), ('bz2', bz2.decompress), ('gz', gzip.decompress)):
url = '%s/dists/%s/%s/binary-%s/Packages.%s'%(mirror, dist, repository, architecture, ext)
try:
package_compressed = download(url)
except Exception:
pass
else:
package_data = decompress(package_compressed)
break
else:
raise Exception ('could not download %s %s [%s]' %(dist, repository, architecture))
try:
package_data = package_data.decode('latin1')
except Exception:
pass
for line in package_data.split('\n'):
if line:
split = line.find(':')
key = line[:split]
value = line[split + 1:].strip()
if key == 'Package':
package = Package(value, architecture)
package.Url = mirror
package_count += 1
try:
packages[architecture][value].append(package)
except KeyError:
packages[architecture][value] = [package]
else:
if key in Package.KEYS:
setattr(package, key, value)
if key == 'Provides':
provides = value.split(',')
for p in provides:
p = p.strip()
try:
packages[architecture][p].append(package)
except:
packages[architecture][p] = [package]
with open(available_package_list_file, 'wb') as package_file:
pickle.dump(packages, package_file)
print('Information on %d packages' % package_count)
def upgrade():
return
def install(package_list):
available_packages, installed_packages, installed_links = load_database()
default_architecture = config['DEFAULT_ARCHITECTURE']
package_list = [(p, None) for p in package_list]
packages_to_check = []
seen = set([])
while package_list:
package_name_arch, package_version = package_list.pop(0)
arch_start = package_name_arch.find(':')
if arch_start != -1:
arch = package_name_arch[arch_start+1:]
package_name = package_name_arch[:arch_start]
else:
arch = default_architecture
package_name = package_name_arch
if (package_name, arch) in seen:
continue
seen.add((package_name, arch))
try:
available_list = sorted(available_packages[arch][package_name], reverse=True, key=lambda x: to_version(x.Version))
for package in available_list:
if package.compatible(package_version):
break
else:
raise Exception('package %s[%s] version %s has no installed candidates.\n Available:\n %s'
% (package_name, arch, package_version, '\n '.join(['%s (%s)'%(p.package_name, p.Version) for p in available_list])))
packages_to_check.append(package)
dependencies = package.Depends.split(',') if package.Depends else []
dependencies += getattr(package, 'Pre-Depends', '').split(',') if getattr(package, 'Pre-Depends', '') else []
for depend in dependencies:
depend_choice = depend.split('|')
for depend in depend_choice:
depend = depend.strip().split(' ', 1)
depend_name = depend[0]
depend_version = depend[1] if len(depend) == 2 else None
if (depend_name, arch) in seen:
for p in packages_to_check:
if p.package_name == depend_name and p.package_arch == arch:
if not p.compatible(depend_version):
raise Exception('%s[%s] requires %s[%s] version %s, but version %s is to be installed'
% (package_name, arch, depend_name, arch, depend_version, p.Version))
break
else:
for pname, pver in package_list:
if pname == depend_name:
break
else:
depend = depend_choice[0].strip().split(' ', 1)
depend_name = depend[0]
depend_version = depend[1] if len(depend) == 2 else None
package_list.insert(0, ('%s:%s'%(depend_name, arch), depend_version))
except KeyError:
raise Exception('Unable to locate package %s[%s]'%(package_name, arch))
new_packages = []
updated_packages = []
for package in packages_to_check:
try:
installed_package = installed_packages[package.package_arch][package.package_name]
except KeyError:
new_packages.append(package)
else:
if to_version(package.Version) > to_version(installed_package.Version):
updated_packages.append((package, installed_package))
if new_packages:
print('The following NEW packages will be installed:\n %s\n' % ', '.join(['%s[%s]'%(p.package_name,p.package_arch) for p in new_packages]))
if updated_packages:
print('The following packages will be upgraded:\n %s\n' % ', '.join(['%s[%s]'%(p.package_name,p.package_arch) for p,__ in updated_packages]))
print('%d upgraded, %d newly installed, and %d to remove' % (len(updated_packages), len(new_packages), 0))
download_packages = filter_packages_to_download(new_packages + [p for p, __ in updated_packages])
download_size = float(sum([int(p.Size) for p in download_packages]))
total_package_size = float(sum([int(p.Size) for p in new_packages]))
total_package_size += float(sum([int(p.Size) for p,_ in updated_packages]))
install_size = float(sum([int(getattr(p, 'Installed-Size')) for p in new_packages]))
install_size += float(sum([int(getattr(p, 'Installed-Size')) for p,__ in updated_packages]))
install_size -= float(sum([int(getattr(p, 'Installed-Size')) for __,p in updated_packages]))
unit_dl = 'B'
if download_size > 1024*5:
download_size /= 1024
unit_dl = 'kB'
if download_size > 1024*5:
download_size /= 1024
unit_dl = 'MB'
unit = 'B'
if total_package_size > 1024*5:
total_package_size /= 1024
unit = 'kB'
if total_package_size > 1024*5:
total_package_size /= 1024
unit = 'MB'
print('Need to get %0.2f %s/%0.2f %s of archives.' % (download_size, unit_dl, total_package_size, unit))
unit = 'kB'
disk = 'used'
if install_size < 0:
disk = 'freed'
install_size = -install_size
if install_size > 1024*5:
install_size /= 1024
unit = 'MB'
print('After this operation, %0.2f %s of additional disk space will be %s.' % (install_size, unit, disk))
print('Do you want to continue? [Y/n]',)
do_download(download_packages)
do_uninstall([p for __,p in updated_packages])
do_install([p for p,__ in updated_packages], installed_links)
do_install(new_packages, installed_links)
generate_ld_conf()
save_database(installed_packages, installed_links)
print('Do you want to delete downloaded packages? [Y/n]',)
#do_delete_packages(new_packages + [p for p, __ in updated_packages])
def uninstall(package_list):
return 0
if __name__ == '__main__':
command = sys.argv[1]
packages = sys.argv[2:]
try:
if command == '-h':
usage()
elif command == 'update':
if packages:
raise Exception(desc)
update()
elif command == 'upgrade':
if packages:
raise Exception(desc)
upgrade()
elif command == 'info':
if packages:
raise Exception(desc)
info()
elif command == 'install':
if not packages:
raise Exception(desc)
install(packages)
elif command == 'remove':
if not packages:
raise Exception(desc)
uninstall(packages)
elif command == 'purge':
if not packages:
raise Exception(desc)
uninstall(packages)
else:
raise Exception('unknown command: %s\n\n%s' % (command, desc))
except Exception as e:
print(e.__class__, e)
exit(1)
else:
exit(0)
|
StarcoderdataPython
|
8129251
|
#!/usr/bin/env python
#
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
# Copyright (c) 2018 <NAME>
#
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# Phylanx K-Means algorithm example in Python. Iteratively clusters 250
# randomly generated points into 3 clusters for the specified number of
# iterations.
#
# Code adapted from: http://flothesof.github.io/k-means-numpy.html
# Original source code is BSD-licensed
#
# \param iterations Number of iterations
# \returns the cluster centroids
import argparse
import csv
import numpy as np
def initialize_centroids(points, k):
centroids = points.copy()
np.random.shuffle(centroids)
return centroids[:k]
def closest_centroid(points, centroids):
distances = np.sqrt(((points - centroids[:, np.newaxis]) ** 2).sum(axis=2))
return np.argmin(distances, axis=0)
def move_centroids(points, closest, centroids):
return np.array([points[closest == k].mean(axis=0) for k in range(
centroids.shape[0])])
def kmeans(points, k, iterations):
centroids = initialize_centroids(points, k)
for i in range(iterations):
centroids = move_centroids(points, closest_centroid(points, centroids),
centroids)
return centroids
def generate_random():
return np.vstack((
(np.random.randn(150, 2) * 0.75 + np.array([1, 0])),
(np.random.randn(50, 2) * 0.25 + np.array([-0.5, 0.5])),
(np.random.randn(50, 2) * 0.5 + np.array([-0.5, -0.5]))
))
def csv_records(path):
with argparse.FileType('r')(path) as csv_file:
data = [d for d in csv.reader(csv_file, delimiter=',')]
return np.asarray(data, dtype=np.float_)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--centroids', type=int, default=3)
parser.add_argument('--iterations', type=int, default=2)
parser.add_argument('--csv-file', dest='points', type=csv_records,
default=generate_random())
parser.add_argument('--dry-run', type=bool, nargs='?', const=True,
default=False)
return parser.parse_args()
def main():
args = parse_args()
if args.dry_run:
print('kmeans', args.points.shape, args.centroids, args.iterations)
else:
print('Cluster centroids are:\n',
kmeans(args.points, args.centroids, args.iterations))
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
6436047
|
<reponame>SOFIE-project/SMAUG-Marketplace
from flask.app import Flask
from project import create_app
from project.db_utils import load_server_config
from dotenv import load_dotenv
from os import environ
load_dotenv('.env')
_app: Flask = None
# Singleton for Flask app
def get_app() -> Flask:
global _app
if _app is None:
_app = create_app(load_server_config())
return _app
if __name__ == "__main__":
app = get_app()
app.run(host=environ.get('HOST', '0.0.0.0'),
port=environ.get('PORT', '61234'))
|
StarcoderdataPython
|
146539
|
<filename>central/aplicacao/leitura.py
from aplicacao.models import Leitura, Sensor, SensorGrandeza
def novaLeitura(_idRede, _grandeza, _valor):
try:
sensor = Sensor.objects.get(idRede=_idRede)
sg = SensorGrandeza.objects.get(sensor=sensor, grandeza_id=_grandeza)
l = Leitura(valor=_valor, grandeza=sg.grandeza,
ambiente=sensor.ambiente, sensor=sensor)
l.save()
except Sensor.DoesNotExist:
print('ID de rede: ' + str(_idRede) + ' nao encontrado')
return False
except SensorGrandeza.DoesNotExist:
print('A grandeza ' + str(_grandeza) +
' nao esta cadastrada no sensor ' + str(sensor))
return False
except Exception as e:
print(e)
return False
|
StarcoderdataPython
|
11363795
|
<gh_stars>10-100
import scrapy
class BooksSpider(scrapy.Spider):
name = 'bookLinks'
start_urls = ['http://books.toscrape.com']
images_data = {}
def parse(self, response):
# follow links to author pages
for img in response.css('a::attr(href)'):
yield response.follow(img, self.parse_images)
def parse_images(self, response):
print ("URL: " + response.request.url)
def extract_with_css(query):
return response.css(query).extract()
yield {
'URL': response.request.url,
'image_link': extract_with_css('img::attr(src)')
}
|
StarcoderdataPython
|
149028
|
from collections import deque
from threading import Condition
from typing import BinaryIO, Deque, Optional
from pytils.mixins import DaemonHandler
from ._base import IOReceiver, IOSender
__all__ = [
'QueuedReceiver',
'QueuedSender',
]
_DEFAULT_MAX_QUEUE_SIZE = 4096
class QueuedSender(DaemonHandler, IOSender):
def __init__(self, dst: BinaryIO, max_queue_size: int = _DEFAULT_MAX_QUEUE_SIZE):
super().__init__(dst)
self.is_closed = False
self._cv = Condition()
self._queue = deque(maxlen=max_queue_size) # type: Optional[Deque[bytes]]
def send(self, msg: bytes):
with self._cv:
self._queue.append(msg)
self._cv.notify()
def is_active(self) -> bool:
return not self.is_closed
def handle_one(self):
with self._cv:
self._cv.wait_for(lambda: self.is_closed or self._queue)
self._cv.notify()
if self._queue:
super().send(self._queue.popleft())
def close(self):
self.is_closed = True
with self._cv:
self._cv.notify_all()
class QueuedReceiver(DaemonHandler, IOReceiver):
def __init__(self, src: BinaryIO, max_queue_size: int = _DEFAULT_MAX_QUEUE_SIZE):
super().__init__(src)
self.is_closed = False
self._cv = Condition()
self._queue = deque(maxlen=max_queue_size) # type: Optional[Deque[bytes]]
def receive(self) -> Optional[bytes]:
with self._cv:
self._cv.wait_for(lambda: self.is_closed or self._queue)
self._cv.notify()
return self._queue.popleft() if self._queue else None
def is_active(self) -> bool:
return not self.is_closed
def handle_one(self):
msg = super().receive()
with self._cv:
if msg is not None:
self._queue.append(msg)
else:
self.is_closed = True
self._cv.notify()
|
StarcoderdataPython
|
1821138
|
# Auto-generated at 2021-09-27T17:12:33.436703+08:00
# from: Justice Lobby Service (1.33.0)
# Copyright (c) 2018 - 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import Model
from ..models.model_localization import ModelLocalization
class ModelNotificationTemplateResponse(Model):
"""Model notification template response
Properties:
template_localizations: (templateLocalizations) REQUIRED List[ModelLocalization]
template_slug: (templateSlug) REQUIRED str
"""
# region fields
template_localizations: List[ModelLocalization] # REQUIRED
template_slug: str # REQUIRED
# endregion fields
# region with_x methods
def with_template_localizations(self, value: List[ModelLocalization]) -> ModelNotificationTemplateResponse:
self.template_localizations = value
return self
def with_template_slug(self, value: str) -> ModelNotificationTemplateResponse:
self.template_slug = value
return self
# endregion with_x methods
# region to methods
def to_dict(self, include_empty: bool = False) -> dict:
result = {}
if hasattr(self, "template_localizations") and self.template_localizations:
result["templateLocalizations"] = [i0.to_dict(include_empty=include_empty) for i0 in self.template_localizations]
elif include_empty:
result["templateLocalizations"] = []
if hasattr(self, "template_slug") and self.template_slug:
result["templateSlug"] = str(self.template_slug)
elif include_empty:
result["templateSlug"] = str()
return result
# endregion to methods
# region static methods
@classmethod
def create(
cls,
template_localizations: List[ModelLocalization],
template_slug: str,
) -> ModelNotificationTemplateResponse:
instance = cls()
instance.template_localizations = template_localizations
instance.template_slug = template_slug
return instance
@classmethod
def create_from_dict(cls, dict_: dict, include_empty: bool = False) -> ModelNotificationTemplateResponse:
instance = cls()
if not dict_:
return instance
if "templateLocalizations" in dict_ and dict_["templateLocalizations"] is not None:
instance.template_localizations = [ModelLocalization.create_from_dict(i0, include_empty=include_empty) for i0 in dict_["templateLocalizations"]]
elif include_empty:
instance.template_localizations = []
if "templateSlug" in dict_ and dict_["templateSlug"] is not None:
instance.template_slug = str(dict_["templateSlug"])
elif include_empty:
instance.template_slug = str()
return instance
@staticmethod
def get_field_info() -> Dict[str, str]:
return {
"templateLocalizations": "template_localizations",
"templateSlug": "template_slug",
}
# endregion static methods
|
StarcoderdataPython
|
11309364
|
import os
import json
from itertools import chain
from functools import reduce
from pathlib import Path
from tqdm import tqdm
import h5py as h5
import fire
import numpy as np
from PIL import Image, ImageDraw, ImageFont, features, ImageOps
# from fontTools.ttLib import TTFont
from logger import Logger
from datasets import thai_decompose as thai
CODE_RANGE = {
'kor': [[0x0021, 0x007E], [0x3131, 0x3163], [0xAC00, 0xD7A3]],
'thai': [[0x0E01, 0x0E3A], [0x0E3F, 0x0E5B]]
}
def get_code_points(language):
codes = set()
code_range = CODE_RANGE[language]
for rangemin, rangemax in code_range:
for codepoint in range(rangemin, rangemax+1):
codes.add(chr(codepoint))
return codes
def dump_to_hdf5(dump_path, font_name, images, chars, compression=None):
with h5.File(dump_path, 'w') as f:
dset = f.create_group('dataset')
dset.attrs['font_name'] = font_name
N = len(images)
print(N, len(chars))
print(np.stack(images).shape)
dset.create_dataset('images', (N, 128, 128), np.uint8, compression=compression,
data=np.stack(images))
data = np.array(chars)
dset.create_dataset('chars', data.shape, np.int, compression=compression,
data=np.array(chars))
class UserFontProcessor(object):
def __init__(self, language, resize_method="bilinear", font_size_factor=2, sample_size=128):
self.logger = Logger.get(file_path='preparedata.log', level='error')
self.language = language
self.targetcodes = get_code_points(self.language)
if resize_method == 'bilinear':
self.resize_method = Image.BILINEAR
else:
raise ValueError('Invalid resize method: {}'.format(resize_method))
self.sample_size = sample_size
self.font_size = self.sample_size * font_size_factor
def ord(self, char):
if self.language == 'kor':
return ord(char)
else:
raise ValueError(self.language)
def get_fontsize(self, npimg):
w, h = npimg.shape
wsum = npimg.sum(0)
hsum = npimg.sum(1)
npimg = 255 - npimg
# # Binary Thresholding.
threshold = 255 * 2
if not npimg.sum():
self.logger.warning(
'{}, "{}" ({}) is empty (no black)'.format(font, char, self.ord(char))
)
return False
wmin = np.arange(w)[wsum>threshold].min()
wmax = np.arange(w)[wsum>threshold].max()
hmin = np.arange(h)[hsum>threshold].min()
hmax = np.arange(h)[hsum>threshold].max()
return max(wmax-wmin, hmax-hmin)
def center_align(self, npimg, fontmaxsize, size=128, margin=0):
# remove boundaries
boundaries = 5
npimg = npimg[boundaries:-(boundaries-1),boundaries:-(boundaries-1)]
w, h = npimg.shape
wsum = npimg.sum(0)
hsum = npimg.sum(1)
npimg = 255 - npimg
# # Binary Thresholding.
threshold = 255 * 2
if not npimg.sum():
import ipdb; ipdb.set_trace(context=15)
# self.logger.warning(
# '{}, "{}" ({}) is empty (no black)'.format(font, char, self.ord(char))
# )
return False
wmin = np.arange(w)[wsum>threshold].min()
wmax = np.arange(w)[wsum>threshold].max()
hmin = np.arange(h)[hsum>threshold].min()
hmax = np.arange(h)[hsum>threshold].max()
npimg = 255 - npimg[hmin:hmax+1, wmin:wmax+1]
canvas_size = int(fontmaxsize*(1+margin))
roi_w = wmax-wmin
roi_h = hmax-hmin
left_margin = (canvas_size - roi_w)//2
right_margin = canvas_size - roi_w - left_margin
top_margin = (canvas_size - roi_h)//2
bottom_margin = canvas_size - roi_h - top_margin
npimg = np.pad(npimg, ((top_margin, bottom_margin), (left_margin, right_margin)),
'constant', constant_values=255)
img = Image.fromarray(npimg).resize((size, size), resample=self.resize_method)
return img
def render_center_no_offset(self, char, font, fontmaxsize, size=128, margin=0):
char = self.fix_char_order_if_thai(char)
size_x, size_y = font.getsize(char)
offset_x, offset_y = font.getoffset(char)
roi_w = size_x-offset_x
roi_h = size_y-offset_y
img = Image.new('L', (roi_w, roi_h), 255)
draw = ImageDraw.Draw(img)
draw.text((-offset_x, -offset_y), char, font=font)
if img.size[0] == 0 or img.size[1] == 0:
self.logger.warning(
'{}, "{}" ({}) is empty (size=0)'.format(font, char, self.ord(char))
)
return False
npimg = 255 - np.array(img)
if not npimg.sum():
self.logger.warning(
'{}, "{}" ({}) is empty (no black)'.format(font, char, self.ord(char))
)
return False
wmin = npimg.sum(0).nonzero()[0].min()
wmax = npimg.sum(0).nonzero()[0].max()
hmin = npimg.sum(1).nonzero()[0].min()
hmax = npimg.sum(1).nonzero()[0].max()
npimg = 255 - npimg[hmin:hmax+1, wmin:wmax+1]
canvas_size = int(fontmaxsize*(1+margin))
left_margin = (canvas_size - roi_w)//2
right_margin = canvas_size - roi_w - left_margin
top_margin = (canvas_size - roi_h)//2
bottom_margin = canvas_size - roi_h - top_margin
npimg = np.pad(npimg, ((top_margin, bottom_margin), (left_margin, right_margin)),
'constant', constant_values=255)
img = Image.fromarray(npimg).resize((size, size), resample=self.resize_method)
def dump_fonts(self, fonts, dump_dir, compression=None):
self.logger.info('# Font candidates: {}'.format(len(fonts)))
dump_dir = Path(dump_dir)
dump_dir.mkdir(parents=True, exist_ok=True)
assert dump_dir.is_dir()
n_fonts = len(fonts)
for i, targetfontpath in enumerate(fonts):
targetfontname = os.path.basename(targetfontpath) # w/ ext
font_name = os.path.basename(targetfontpath) # w/o ext
hdf5_name = "{}.hdf5".format(font_name)
dump_path = dump_dir / hdf5_name
if dump_path.exists():
continue
targetfontpath = Path(targetfontpath)
targetfonts = [str(fname) for fname in targetfontpath.glob("*")]
images = []
chars = []
# get max font size
fontmaxsize = 0
for f in targetfonts:
img = Image.open(f)
npimg = np.array(ImageOps.grayscale(img))
maxsize = self.get_fontsize(npimg)
fontmaxsize = max(fontmaxsize, maxsize)
for f in targetfonts:
img = Image.open(f)
npimg = np.array(ImageOps.grayscale(img))
# img = Image.fromarray(npimg)
img = self.center_align(npimg, fontmaxsize, size=128, margin=0.4)
# img = Image.fromarray(npimg).resize((128, 128), resample=self.resize_method)
if not img:
continue
char = os.path.basename(f)
char = char.split('.')[0][3:]
char = int(char, 16)
images.append(img)
chars.append(char)
dump_to_hdf5(dump_path, targetfontname, images, chars, compression=compression)
# self.logger.info("[{:3d}/{:3d}] {} has {} valid chars and {} images...".format(
# i, n_fonts, font_name, len(images)))
def main(language, fonts_dir, meta_path, dump_dir):
"""
Args:
language: kor / thai
fonts_dir: font directory that has ttf files
meta_path: meta file path
dump_dir: dataset dir
"""
fonts_dir = Path(fonts_dir)
meta = json.load(open(meta_path, encoding="utf-8"))
allfonts = set(meta['train']['fonts'] + meta['valid']['fonts'])
fonts = [
str(fname) for fname in fonts_dir.glob("*") if fname.name in allfonts
]
assert len(allfonts) == len(fonts)
processor = UserFontProcessor(language)
processor.dump_fonts(fonts, dump_dir)
if __name__ == '__main__':
fire.Fire(main)
|
StarcoderdataPython
|
11272297
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ************************************
# @Time : 2019/7/20 15:59
# @Author : <NAME>
# @Lab : nesa.zju.edu.cn
# @File : cfg_config.py
# ************************************
import argparse
parser = argparse.ArgumentParser(description="Multi-Level Graph Matching Network for the Graph-Graph Classification tasks")
parser.add_argument('--data_dir', type=str, default='../data/CFG', help='root directory for the data set')
parser.add_argument('--dataset', type=str, default="ffmpeg", help='indicate the specific data set (ffmpeg/OpenSSL)')
parser.add_argument('--graph_size_min', type=int, default=3, help='min node size for one graph ')
parser.add_argument('--graph_size_max', type=int, default=200, help='max node size for one graph ')
parser.add_argument('--graph_init_dim', type=int, default=6, help='init feature dimension for one graph')
parser.add_argument("--task", type=str, default='classification', help="classification/regression")
parser.add_argument("--filters", type=str, default='100_100_100', help="filters (neurons) for graph neural networks")
parser.add_argument("--conv", type=str, default='gcn', help="one kind of graph neural networks")
parser.add_argument("--match", type=str, default='node-graph', help="indicating the matching method")
parser.add_argument("--perspectives", type=int, default=100, help='number of perspectives for node-graph matching')
parser.add_argument("--match_agg", type=str, default='bilstm', help="aggregator")
parser.add_argument("--hidden_size", type=int, default=100, help='hidden size for the graph-level embedding')
# global-level information
parser.add_argument("--global_flag", type=lambda x: (str(x).lower() == 'true'), default='True', help="Whether use global info ")
parser.add_argument("--global_agg", type=str, default='fc_max_pool', help="aggregation function for global level gcn ")
# training parameters for classification tasks
parser.add_argument('--epochs', type=int, default=100, help='number of training epochs')
parser.add_argument("--batch_size", type=int, default=5, help="Number of graph pairs per batch.")
parser.add_argument("--lr", type=float, default=0.5e-3, help="Learning rate.")
parser.add_argument("--dropout", type=float, default=0.1, help="Dropout probability.")
# others
parser.add_argument('--gpu_index', type=str, default='1', help="gpu index to use")
parser.add_argument('--log_path', type=str, default='../CFGLogs/', help='path for log file')
parser.add_argument('--repeat_run', type=int, default=1, help='indicated the index of repeat run')
# only test
parser.add_argument('--only_test', type=lambda x: (str(x).lower() == 'true'), default='false')
parser.add_argument('--model_path', type=str, default='.')
cfg_args = parser.parse_args()
|
StarcoderdataPython
|
9745468
|
<filename>funcx_container_service/build.py
import os
import json
import asyncio
import tarfile
import tempfile
import docker
import boto3
from pathlib import Path
from datetime import datetime
from docker.errors import ImageNotFound
from fastapi import HTTPException
from . import database, landlord
from .models import ContainerSpec, ContainerState
REPO2DOCKER_CMD = 'jupyter-repo2docker --no-run --image-name {} {}'
SINGULARITY_CMD = 'singularity build --force {} docker-daemon://{}:latest'
DOCKER_BASE_URL = 'unix://var/run/docker.sock'
def s3_connection():
return boto3.client('s3')
def ecr_connection():
return boto3.client('ecr')
def s3_upload(s3, filename, bucket, key):
s3.upload_file(filename, bucket, key)
def s3_check(db, s3, bucket, container_id):
try:
s3.head_object(Bucket=bucket, Key=container_id)
except s3.exceptions.NoSuchKey:
return False
return True
def ecr_check(db, ecr, container_id):
try:
resp = ecr.list_images(repositoryName=container_id)
return len(resp['imageIds']) > 0
except ecr.exceptions.RepositoryNotFoundException:
return False
return True
def docker_name(container_id):
# XXX need to add repo info here
return f'funcx_{container_id}'
def docker_size(container_id):
docker_client = docker.APIClient(base_url=DOCKER_BASE_URL)
try:
inspect = docker_client.inspect_image(docker_name(container_id))
return inspect['VirtualSize']
except ImageNotFound:
return None
def env_from_spec(spec):
out = {
"name": "funcx-container",
"channels": ["conda-forge"],
"dependencies": ["pip"]
}
if spec.conda:
out["dependencies"] += list(spec.conda)
if spec.pip:
out["dependencies"].append({"pip": list(spec.pip)})
return out
async def build_spec(s3, container_id, spec, tmp_dir):
if spec.apt:
with (tmp_dir / 'apt.txt').open('w') as f:
f.writelines([x + '\n' for x in spec.apt])
with (tmp_dir / 'environment.yml').open('w') as f:
json.dump(env_from_spec(spec), f, indent=4)
return await repo2docker_build(s3, container_id, tmp_dir)
async def build_tarball(s3, container_id, tarball, tmp_dir):
with tarfile.open(tarball) as tar_obj:
await asyncio.to_thread(tar_obj.extractall, path=tmp_dir)
# For some reason literally any file will pass through this tarfile check
if len(os.listdir(tmp_dir)) == 0:
raise HTTPException(status_code=415, detail="Invalid tarball")
return await repo2docker_build(s3, container_id, tmp_dir)
async def repo2docker_build(s3, container_id, temp_dir):
with tempfile.NamedTemporaryFile() as out:
proc = await asyncio.create_subprocess_shell(
REPO2DOCKER_CMD.format(docker_name(container_id), temp_dir),
stdout=out, stderr=out)
await proc.communicate()
out.flush()
out.seek(0)
await asyncio.to_thread(
s3_upload, s3, out.name, 'docker-logs', container_id)
if proc.returncode != 0:
return None
return docker_size(container_id)
async def singularity_build(s3, container_id):
with tempfile.NamedTemporaryFile() as sif, \
tempfile.NamedTemporaryFile() as out:
proc = await asyncio.create_subprocess_shell(
SINGULARITY_CMD.format(sif.name, docker_name(container_id)),
stdout=out, stderr=out)
await proc.communicate()
await asyncio.to_thread(
s3_upload, s3, out.name, 'singularity-logs', container_id)
if proc.returncode != 0:
return None
container_size = os.stat(sif.name).st_size
if container_size > 0:
await asyncio.to_thread(
s3_upload, s3, sif.name, 'singularity', container_id)
else:
container_size = None
return container_size
async def docker_build(s3, container, tarball):
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
if container.specification:
container_size = await build_spec(
s3,
container.id,
ContainerSpec.parse_raw(container.specification),
tmp)
else:
if not tarball:
download = tempfile.NamedTemporaryFile()
tarball = download.name
await asyncio.to_thread(
s3.download_file, 'repos', container.id, tarball)
container_size = await build_tarball(
s3,
container.id,
tarball,
tmp)
# just to be safe
os.unlink(tarball)
return container_size
async def make_s3_url(db, s3, bucket, build_id, is_container=True):
for row in db.query(database.Build).filter(database.Build.id == build_id):
container = row.container
break
else:
raise HTTPException(status_code=404)
if not s3_check(db, s3, bucket, container.id):
return None
url = s3.generate_presigned_url(
'get_object',
Params={'Bucket': bucket, 'Key': container.id})
return url
async def make_s3_container_url(db, s3, bucket, build_id):
for row in db.query(database.Build).filter(database.Build.id == build_id):
container = row.container
break
else:
raise HTTPException(status_code=404)
if container.state == ContainerState.failed:
raise HTTPException(status_code=410)
elif container.state != ContainerState.ready:
alt = await landlord.find_existing(
db, ContainerSpec.parse_raw(container.specification))
if alt:
container = alt
else:
return container.id, None
if not s3_check(db, s3, bucket, container.id):
await remove(db, container.id)
return container.id, None
container.last_used = datetime.now()
url = s3.generate_presigned_url(
'get_object',
Params={'Bucket': bucket, 'Key': container.id})
return container.id, url
async def make_ecr_url(db, ecr, build_id):
for row in db.query(database.Build).filter(database.Build.id == build_id):
container = row.container
break
else:
raise HTTPException(status_code=404)
if container.state == ContainerState.failed:
raise HTTPException(status_code=410)
elif container.state != ContainerState.ready:
alt = await landlord.find_existing(
db, ContainerSpec.parse_raw(container.specification))
if alt:
container = alt
else:
return container.id, None
if not ecr_check(db, ecr, container.id):
await remove(db, container.id)
return container.id, None
container.last_used = datetime.now()
return container.id, docker_name(container.id)
async def background_build(container_id, tarball):
with database.session_scope() as db:
if not await database.start_build(db, container_id):
return
container = db.query(database.Container).filter(
database.Container.id == container_id).one()
s3 = s3_connection()
docker_client = docker.APIClient(base_url=DOCKER_BASE_URL)
try:
container.docker_size = await docker_build(s3, container, tarball)
if container.docker_size is None:
container.state = ContainerState.failed
return
container.singularity_size = await singularity_build(
s3, container_id)
if container.singularity_size is None:
container.state = ContainerState.failed
return
await asyncio.to_thread(docker_client.push,
docker_name(container_id))
container.state = ContainerState.ready
finally:
container.builder = None
await landlord.cleanup(db)
async def remove(db, container_id):
container = db.query(database.Container).filter(
database.Container.id == container_id).one()
container.state = ContainerState.pending
container.builder = None
container.docker_size = None
container.singularity_size = None
s3 = s3_connection()
ecr = ecr_connection()
await asyncio.to_thread(s3.delete_object,
{'Bucket': 'singularity', 'Key': container_id})
await asyncio.to_thread(s3.delete_object,
{'Bucket': 'singularity-logs',
'Key': container_id})
await asyncio.to_thread(s3.delete_object,
{'Bucket': 'docker-logs', 'Key': container_id})
try:
await asyncio.to_thread(ecr.delete_repository,
repositoryName=container_id, force=True)
except ecr.exceptions.RepositoryNotFoundException:
pass
|
StarcoderdataPython
|
197734
|
<reponame>warrench/qiskit-metal
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""QRenderer base class."""
import logging
import inspect
from copy import deepcopy
from typing import TYPE_CHECKING
from typing import List, Tuple, Union, Any, Iterable
from typing import Dict as Dict_
from typing import List, Tuple, Union
from qiskit_metal.designs import is_design
from qiskit_metal.qgeometries import QGeometryTables
from ... import Dict
__all__ = ['QRenderer']
if TYPE_CHECKING:
# For linting typechecking, import modules that can't be loaded here under normal conditions.
# For example, I can't import QDesign, because it requires Qrenderer first. We have the
# chicken and egg issue.
from qiskit_metal.designs import QDesign
class QRenderer():
"""Abstract base class for all Renderers of Metal designs and their
components and qgeometry.
Handles:
::
designs
components
qgeometry
paths
polys
chips
"""
name = 'base' # overwrite this!
"""Name"""
__loaded_renderers__ = set()
__instantiated_renderers__ = dict()
# overwrite this to add element extensions: see ELEMENT_COLUMNS
# should be dict of dict with keys as element type, which contain (name, dype) pairs
# e.g. element_extensions = dict(
# base=dict(color=str, klayer=int),
# path=dict(thickness=float, material=str, perfectE=bool),
# poly=dict(thickness=float, material=str), )
element_extensions = dict()
"""Element extensions dictionary"""
# TODO: To add: default parameters for the renderer for component element values.
element_table_data = dict()
@classmethod
def load(cls):
"""Load the renderer and register all its extensions. Only performed
once.
Once complete, the renderer is added to the class attribute
'__loaded_renderers__' of QRenderer
Returns:
bool: True if success, otherwise throws an error.
"""
# Check name
name = cls.name
if name in QRenderer.__loaded_renderers__:
pass
# print(f'Warning: Renderer name={name}, class={cls} already loaded. Doing nothing.')
cls.populate_element_extensions()
# Add element extensions
# see docstring for QRenderer.element_extensions
QGeometryTables.add_renderer_extension(cls.name, cls.element_extensions)
# Moved to init for each renderer.
# Add component extensions
# to be used in the creation of default params for component qgeometry
#raise NotImplementedError()
# Finish and register officially as ready to use.
QRenderer.__loaded_renderers__.add(name)
# Reset the table for the next QRenderer.
for table in cls.element_table_data.keys():
cls.element_extensions.pop(table, None)
return True
@classmethod
def populate_element_extensions(cls):
"""Populate cls.element_extensions which will be used to create columns
for tables in QGeometry tables.
The structure of cls.element_table_data should be same as
cls.element_extensions.
"""
for table, a_dict in cls.element_table_data.items():
cls.element_extensions[table] = dict()
for col_name, col_value in a_dict.items():
# type will only tell out about the base class, won't tell you about the inheritance.
cls.element_extensions[table][col_name] = type(col_value)
@staticmethod
def get_renderer(name: str):
"""Returns an already loaded and instantiated renderer.
Args:
name (str): rendering name
Returns:
QRenderer: Renderer with the given name
"""
if not name in QRenderer.__loaded_renderers__:
print(
'ERROR: The renderer {name} has not yet been loaded. Please use the load function!'
)
if not name in QRenderer.__instantiated_renderers__:
print(
'ERROR: The renderer {name} has not yet been instantiated. Please instantiate the class!'
)
return QRenderer.__instantiated_renderers__[name]
def __init__(self,
design: 'QDesign',
initiate=True,
render_template: Dict = None,
render_options: Dict = None):
"""
Args:
design (QDesign): The design
initiate (bool): True to initiate the renderer. Defaults to True.
render_template (Dict, optional): Typically used by GUI for template options for GDS. Defaults to None.
render_options (Dict, optional): Used to override all options. Defaults to None.
"""
# TODO: check that the renderer has been loaded with load_renderer
self.status = 'Not Init'
assert is_design(
design), "Erorr, for the design argument you must provide a\
a child instance of Metal QDesign class."
self._design = design
self.initiated = False
if initiate:
self.initate()
# Register as an instantiated renderer.
QRenderer.__instantiated_renderers__[self.name] = self
# Options
self._options = Dict()
self.update_options(render_options=render_options,
render_template=render_template)
self.status = 'Init Completed'
@property
def options(self) -> Dict:
"""Options for the QRenderer."""
return self._options
@property
def design(self) -> 'QDesign':
"""Return a reference to the parent design object."""
return self._design
@property
def logger(self) -> logging.Logger:
"""Returns the logger."""
return self._design.logger
@classmethod
def _gather_all_children_default_options(cls) -> Dict:
"""From the base class of QRenderer, traverse the child classes to
gather the .default_options for each child class.
Note: If keys are the same for a child and grandchild, the grandchild will
overwrite the child init method.
Returns:
Dict: Options from all children.
"""
options_from_children = Dict()
parents = inspect.getmro(cls)
# QRenderer is not expected to have default_options dict to add to QRenderer class.
for child in parents[len(parents) - 2::-1]:
# There is a developer agreement so the defaults for a renderer will be in a dict named default_options.
if hasattr(child, 'default_options'):
options_from_children = {
**options_from_children,
**child.default_options
}
return options_from_children
@classmethod
def _get_unique_class_name(cls) -> str:
"""Returns unique class name based on the module.
Returns:
str: Example: 'qiskit_metal.renders.renderer_gds.gds_renderer.QGDSRenderer'
"""
return f'{cls.__module__}.{cls.__name__}'
@classmethod
def _register_class_with_design(cls, design: 'QDesign', template_key: str,
render_template: Dict):
"""Init function to register a renderer class with the design when
first instantiated. Registers the renderer's template options.
Args:
design (QDesign): The parent design
template_key (str): Key to use
render_template (dict): template of render to copy
"""
# do not overwrite
if template_key not in design.template_options:
if not render_template:
render_template = cls._gather_all_children_default_options()
design.template_options[template_key] = deepcopy(render_template)
@classmethod
def get_template_options(cls,
design: 'QDesign',
render_template: Dict = None,
logger_: logging.Logger = None,
template_key: str = None) -> Dict:
"""Creates template options for the Metal QRenderer class required for
the class to function, based on the design template; i.e., be created,
made, and rendered. Provides the blank option structure required.
The options can be extended by plugins, such as renderers.
Args:
design (QDesign): A design class.
render_template (Dict, optional): Template options to overwrite the class ones. Defaults to None.
logger_ (logging.Logger, optional): A logger for errors. Defaults to None.
template_key (str, optional): The design.template_options key identifier. If None, then use
_get_unique_class_name(). Defaults to None.
Returns:
Dict: Dictionary of renderer's default options based on design.template_options.
"""
# get key for templates
if template_key is None:
template_key = cls._get_unique_class_name()
if template_key not in design.template_options:
# Registers the renderer's template options.
cls._register_class_with_design(design, template_key,
render_template)
# Only log warning, if template_key not registered within design.
if template_key not in design.template_options:
logger_ = logger_ or design.logger
if logger_:
logger_.error(
f'ERROR in creating renderer {cls.__name__}!\nThe default '
f'options for the renderer class {cls.__name__} are missing'
)
# Specific object render template options
options = deepcopy(Dict(design.template_options[template_key]))
return options
def parse_value(self, value: Union[Any, List, Dict, Iterable]) -> Any:
"""Same as design.parse_value. See design for help.
Returns:
object: Parsed value of input.
"""
return self.design.parse_value(value)
def update_options(self,
render_options: Dict = None,
render_template: Dict = None):
"""If template options has not been set for this renderer, then gather
all the default options for children and add to design. The GUI would
use this to store the template options.
Then give the template options to render
to store in self.options. Then user can over-ride the render_options.
Args:
render_options (Dict, optional): If user wants to over-ride the template
options. Defaults to None.
render_template (Dict, optional): All the template options for each child.
Defaults to None.
"""
self.options.update(
self.get_template_options(self.design,
render_template=render_template))
if render_options:
self.options.update(render_options)
def add_table_data_to_QDesign(self, class_name: str):
"""During init of renderer, this needs to happen. In particular, each
renderer needs to update custom columns and values within QDesign.
Args:
class_name (str): Name from cls.name for each renderer.
"""
status = set()
if not isinstance(QRenderer.name, str):
self.logger.warning(
f'In add_table_data_to_QDesign, cls.str={QRenderer.name} is not a str.'
)
return
for table, a_dict in self.element_table_data.items():
for col_name, col_value in a_dict.items():
status = self.design.add_default_data_for_qgeometry_tables(
table, class_name, col_name, col_value)
if 5 not in status:
self.logger.warning(
f'col_value={col_value} not added to QDesign')
def initate(self, re_initiate=False):
"""Call any initiations steps required to be performed a single time
before rendering, such as connecting to some API or COM, or importing
the correct material libraries, etc.
Overwrite `initiate_renderer`.
Args:
re_initiate (bool) : If False will only apply this function once.
If True, will re-apply. Defaults to False.
Returns:
bool: was a re_initiation applied or not
"""
if not re_initiate:
if self.initiated:
return False
self.initiated = True
self._initate_renderer()
return True
def get_unique_component_ids(
self,
highlight_qcomponents: Union[list,
None] = None) -> Tuple[list, int]:
"""Confirm the list doesn't have names of components repeated. Confirm
that the name of component exists in QDesign. If QDesign doesn't
contain any component, or if all components in QDesign are found in
highlight_qcomponents, return an empty list; otherwise return a list of
unique components to be sent to the renderer. The second returned item, an
integer, specifies which of these 3 cases applies.
Args:
highlight_qcomponents (Union[list, None], optional): Components to render. Defaults to None.
Returns:
Tuple[list, int]: Empty or partial list of components in QDesign.
"""
highlight_qcomponents = highlight_qcomponents if highlight_qcomponents else []
unique_qcomponents = set(highlight_qcomponents)
for qcomp in unique_qcomponents:
if qcomp not in self.design.name_to_id:
self.logger.warning(
f'The component={qcomp} in highlight_qcomponents not'
' in QDesign.')
return [], 2 # Invalid
if len(unique_qcomponents) in (0, len(self.design.components)):
return [], 1 # Everything selected
return [self.design.name_to_id[elt] for elt in unique_qcomponents
], 0 # Subset selected
def _initate_renderer(self):
"""Call any initiations steps required to be performed a single time
before rendering, such as connecting to some API or COM, or importing
the correct material libraries, etc.
Returns:
bool: Always returns True
"""
return True
def post_render(self):
"""Any calls that one may want to make after a rendering is
complete."""
pass
def render_design(self):
"""Renders all design chips and components."""
self.initate()
self.render_chips()
self.render_components()
# ...
def render_chips(self, all_chips):
"""Render all chips of the design. Calls render_chip for each chip.
Args:
all_chips (list): All chip names to render.
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_chips' is
# abstract in class 'QRenderer' but is not overridden,
# have this method do something.
type(all_chips)
raise NotImplementedError()
def render_chip(self, name):
"""Render the given chip.
Args:
name (str): Chip to render.
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_chip' is
# abstract in class 'QRenderer' but is not overridden,
# have this method do something.
type(name)
raise NotImplementedError()
def render_components(self, selection=None):
"""Render all components of the design.
If selection is none, then render all components.
Args:
selection (QComponent): Component to render.
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_component'
# is abstract in class 'QRenderer' but is not overridden,
# have this method do something.
type(selection)
raise NotImplementedError()
def render_component(self, qcomponent):
"""Render the specified qcomponent.
Args:
qcomponent (QComponent): QComponent to render.
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_component'
# is abstract in class 'QRenderer' but is not overridden,
# have this method do something.
type(qcomponent)
raise NotImplementedError()
def render_element(self, element):
"""Render the specified element.
Args:
element (Element): Element to render
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_element' is
# abstract in class 'QRenderer' but is not overridden,
# have this method do something.
type(element)
raise NotImplementedError()
# if isinstance(element, path):
# self.render_element_path(element)
# elif isinstance(element, poly):
# self.render_element_poly(element)
# else:
# self.logger.error('RENDERER ERROR: Unknown element {element}')
def render_element_path(self, path):
"""Render an element path.
Args:
path (str): Path to render.
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_element_path'
# is abstract in class 'QRenderer' but is not overridden,
# have this method do something.
type(path)
raise NotImplementedError()
def render_element_poly(self, poly):
"""Render an element poly.
Args:
poly (Poly): Poly to render
Raises:
NotImplementedError: Function not written yet
"""
# To avoid linting message in a subclass: Method 'render_element_poly'
# is abstract in class 'QRenderer' but is not overridden
# have this method do something.
type(poly)
raise NotImplementedError()
|
StarcoderdataPython
|
4880517
|
<filename>scripts/game_handler.py<gh_stars>0
#game_handler.py
global game
global board
def printGame():
global game
global board
wasSeparated=[]
for _ in range(len(board[0])):
wasSeparated.append(False)
# loop over all rows of board
for rownum in range(len(board)):
gamerow=game[rownum]
boardrow=board[rownum]
printstr = ""
topstr = ""
lastval = "0"
# for every char from board list figure out if vertical seperation is needed -> insert "|" otherwise space
for valuenum in range(len(boardrow)):
boardvalue=boardrow[valuenum]
gamevalue=gamerow[valuenum]
lastSeparated=False
if(boardvalue==lastval):
printstr += " "+gamevalue
else:
printstr += "|"+gamevalue
lastSeparated=True
lastval=boardvalue
#generate the row above this row by combining "-" and "+" to fit to grid
if(rownum==0):
if("0"==boardvalue):
if(wasSeparated[valuenum] or lastSeparated):
topstr+="+ "
else:
topstr+=" "
else:
if(wasSeparated[valuenum] or lastSeparated):
topstr+="+—"
else:
topstr+="——"
else:
if(board[rownum-1][valuenum]==boardvalue):
if(wasSeparated[valuenum] or lastSeparated):
topstr+="+ "
else:
topstr+=" "
else:
if(wasSeparated[valuenum] or lastSeparated):
topstr+="+—"
else:
topstr+="——"
wasSeparated[valuenum]=lastSeparated
print(topstr)
print(printstr)
# function for importing .board files
def readBoard(boardfilepath):
global game
global board
board = []
game = []
boardfile = open(boardfilepath,'r')
# loop over rows
for rowdata in boardfile.readlines():
# row for board list
row=[]
# row for game list
gamerow = []
# append 0 for to make printing easier(cf. printBoard), does not affect game
rowdata+="0"
# convert chars from file into 2d board list and create same size empty game list
for char in rowdata:
if(not char=='\n'):
row.append(char)
gamerow.append(" ")
# append lists to board and game list
board.append(row)
game.append(gamerow)
lastrow= []
# add 0s for easier printing at the bottom
for _ in range(len(row)):
lastrow.append('0')
board.append(lastrow)
game.append(gamerow)
# function to modify game list to reflect a move made by a player
def makeMove(player,move):
global game
global board
move=(int(move[0]),int(move[1]))
if(game[move[1]][move[0]]==" " and (not board[move[1]][move[0]]=="0")):
if(player=="max"):
game[move[1]][move[0]]="a"
else:
game[move[1]][move[0]]="i"
# undo move -> no need to copy game list every time it is modified
def undoMove(move):
global game
global board
move=(int(move[0]),int(move[1]))
game[move[1]][move[0]]=" "
# generate a list of lists containing the coordinates of the spots corresponding to the seperate plates
def generateTilesInfo():
global board
tiles = []
knownValues=[]
for rownum in range(len(board)):
row = board[rownum]
for valuenum in range(len(row)):
value=row[valuenum]
if(not value=="0"):
isKnown=False
for testVal in knownValues:
if(testVal==value):
isKnown=True
if(not isKnown):
tile = []
for testrownum in range(len(board)):
testrow = board[testrownum]
for testvaluenum in range(len(testrow)):
testvalue=testrow[testvaluenum]
if(testvalue==value):
tile.append((testvaluenum,testrownum))
tiles.append(tile)
knownValues.append(value)
return tiles
# count points based on tilesInfo retrieved from above function
def countPoints(tilesInfo):
global game
count=0
for tile in tilesInfo:
tilecount=0
for x,y in tile:
x,y = int(x),int(y)
if(game[y][x]=="a"):
tilecount+=1
elif(game[y][x]=="i"):
tilecount-=1
if(tilecount>0):
count+=len(tile)
elif(tilecount<0):
count-=len(tile)
return count
# get all possible moves given the last two moves and the game list
def possibleMoves(secLastX,secLastY,lastX,lastY):
global game
global board
moves = []
secLastX,secLastY,lastX,lastY=int(secLastX),int(secLastY),int(lastX),int(lastY)
for testX in range(len(game[0])):
if(game[lastY][testX]==" "):
if(not board[lastY][testX]=="0"):
if(not board[lastY][lastX]==board[lastY][testX]):
if(not board[secLastY][secLastX]==board[lastY][testX]):
moves.append((testX,lastY))
for testY in range(len(game)):
if(game[testY][lastX]==" "):
if(not board[testY][lastX]=="0"):
if(not board[lastY][lastX]==board[testY][lastX]):
if(not board[secLastY][secLastX]==board[testY][lastX]):
moves.append((lastX,testY))
return moves
|
StarcoderdataPython
|
157246
|
<filename>riko/modules/yql.py<gh_stars>0
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
riko.modules.yql
~~~~~~~~~~~~~~~~
Provides functions for fetching the result of a
[YQL](http://developer.yahoo.com/yql) query.
YQL exposes a SQL-like SELECT syntax that is both familiar to developers and
expressive enough for getting the right data. To use YQL, simply enter a YQL
statement, e.g., `select * from feed where url='http://digg.com/rss/index.xml'`.
To drill down further into the result set you can use either the sub-element
module or projection in a YQL statement. For example:
`select title from feed where url='http://digg.com/rss/index.xml'` returns only
the titles from the Digg RSS feed.
The YQL module has 2 viewing modes: Results only or Diagnostics and results.
Diagnostics provides additional data such as: count, language type and more.
A more complex query that finds Flickr photos tagged "fog" in San Francisco:
select * from flickr.photos.info where photo_id in (
select id from flickr.photos.search where woe_id in (
select woeid from geo.places where text="san francisco, ca")
and tags = "fog")
Examples:
basic usage::
>>> from riko import get_path
>>> from riko.utils import fetch, get_abspath
>>> from riko.modules.yql import pipe
>>>
>>> feed = 'http://feeds.feedburner.com/TechCrunch/'
>>> conf = {'query': "select * from feed where url='%s'" % feed}
>>> url = get_abspath(get_path('yql.xml'))
>>>
>>> with fetch(url) as f:
... next(pipe(conf=conf, response=f))['title']
'Bring pizza home'
Attributes:
OPTS (dict): The default pipe options
DEFAULTS (dict): The default parser options
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals)
import pygogo as gogo
from builtins import * # noqa pylint: disable=unused-import
from . import processor
from riko.parsers import xml2etree, etree2dict
from riko.utils import fetch
from riko.bado import coroutine, return_value, util, requests as treq
OPTS = {'ftype': 'none'}
# we use the default format of xml since json looses some structure
DEFAULTS = {'url': 'http://query.yahooapis.com/v1/public/yql', 'debug': False}
logger = gogo.Gogo(__name__, monolog=True).logger
@coroutine
def async_parser(_, objconf, skip=False, **kwargs):
""" Asynchronously parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: content)
stream (dict): The original item
Returns:
Deferred: twisted.internet.defer.Deferred stream
Examples:
>>> from six.moves.urllib.request import urlopen
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from riko.utils import get_abspath
>>> from meza.fntools import Objectify
>>>
>>> feed = 'http://feeds.feedburner.com/TechCrunch/'
>>> url = 'http://query.yahooapis.com/v1/public/yql'
>>> query = "select * from feed where url='%s'" % feed
>>> f = urlopen(get_abspath(get_path('yql.xml')))
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['title'])
... conf = {'query': query, 'url': url, 'debug': False}
... objconf = Objectify(conf)
... kwargs = {'stream': {}, 'response': f}
... d = async_parser(None, objconf, **kwargs)
... d.addCallbacks(callback, logger.error)
... d.addCallback(lambda _: f.close())
... return d
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
... finally:
... f.close()
Bring pizza home
"""
if skip:
stream = kwargs['stream']
else:
f = kwargs.get('response')
if not f:
params = {'q': objconf.query, 'diagnostics': objconf.debug}
r = yield treq.get(objconf.url, params=params)
f = yield treq.content(r)
tree = yield util.xml2etree(f)
results = next(tree.getElementsByTagName('results'))
stream = map(util.etree2dict, results.childNodes)
return_value(stream)
def parser(_, objconf, skip=False, **kwargs):
""" Parses the pipe content
Args:
_ (None): Ignored
objconf (obj): The pipe configuration (an Objectify instance)
skip (bool): Don't parse the content
kwargs (dict): Keyword arguments
Kwargs:
assign (str): Attribute to assign parsed content (default: content)
stream (dict): The original item
Returns:
Iter[dict]: The stream of items
Examples:
>>> from riko import get_path
>>> from riko.utils import get_abspath
>>> from meza.fntools import Objectify
>>>
>>> feed = 'http://feeds.feedburner.com/TechCrunch/'
>>> url = 'http://query.yahooapis.com/v1/public/yql'
>>> query = "select * from feed where url='%s'" % feed
>>> conf = {'query': query, 'url': url, 'debug': False}
>>> objconf = Objectify(conf)
>>> url = get_abspath(get_path('yql.xml'))
>>>
>>> with fetch(url) as f:
... kwargs = {'stream': {}, 'response': f}
... result = parser(None, objconf, **kwargs)
>>>
>>> next(result)['title']
'Bring pizza home'
"""
if skip:
stream = kwargs['stream']
else:
f = kwargs.get('response')
if not f:
params = {'q': objconf.query, 'diagnostics': objconf.debug}
if objconf.memoize and not objconf.cache_type:
objconf.cache_type = 'auto'
f = fetch(params=params, **objconf)
# TODO: consider paging for large result sets
root = xml2etree(f).getroot()
results = root.find('results')
stream = map(etree2dict, results)
return stream
@processor(DEFAULTS, isasync=True, **OPTS)
def async_pipe(*args, **kwargs):
"""A source that asynchronously fetches the content of a given website as
DOM nodes or a string.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'query'. May
contain the keys 'url' or 'debug'.
url (str): The API to query (default:
'http://query.yahooapis.com/v1/public/yql')
query (str): The API query
debug (bool): Enable diagnostics mode (default: False)
assign (str): Attribute to assign parsed content (default: content)
response (str): The API query response (used for offline testing)
Returns:
dict: twisted.internet.defer.Deferred stream of items
Examples:
>>> from six.moves.urllib.request import urlopen
>>> from riko import get_path
>>> from riko.bado import react
>>> from riko.bado.mock import FakeReactor
>>> from riko.utils import get_abspath
>>>
>>> feed = 'http://feeds.feedburner.com/TechCrunch/'
>>> query = "select * from feed where url='%s'" % feed
>>> f = urlopen(get_abspath(get_path('yql.xml')))
>>>
>>> def run(reactor):
... callback = lambda x: print(next(x)['title'])
... d = async_pipe(conf={'query': query}, response=f)
... d.addCallbacks(callback, logger.error)
... d.addCallback(lambda _: f.close())
... return d
>>>
>>> try:
... react(run, _reactor=FakeReactor())
... except SystemExit:
... pass
... finally:
... f.close()
Bring pizza home
"""
return async_parser(*args, **kwargs)
@processor(DEFAULTS, **OPTS)
def pipe(*args, **kwargs):
"""A source that fetches the result of a given YQL query.
Args:
item (dict): The entry to process
kwargs (dict): The keyword arguments passed to the wrapper
Kwargs:
conf (dict): The pipe configuration. Must contain the key 'query'. May
contain the keys 'url' or 'debug'.
url (str): The API to query (default:
'http://query.yahooapis.com/v1/public/yql')
query (str): The API query
debug (bool): Enable diagnostics mode (default: False)
assign (str): Attribute to assign parsed content (default: content)
response (str): The API query response (used for offline testing)
Yields:
dict: an item of the result
Examples:
>>> from riko import get_path
>>> from riko.utils import get_abspath
>>>
>>> feed = 'http://feeds.feedburner.com/TechCrunch/'
>>> conf = {'query': "select * from feed where url='%s'" % feed}
>>> url = get_abspath(get_path('yql.xml'))
>>>
>>> with fetch(url) as f:
... result = next(pipe(conf=conf, response=f))
... sorted(result.keys())
['alarmTime', 'begin', 'duration', 'place', 'title', 'uid']
>>> result['title']
'Bring pizza home'
"""
return parser(*args, **kwargs)
|
StarcoderdataPython
|
6554636
|
<reponame>cjolowicz/cutty<filename>src/cutty/filesystems/adapters/git.py
"""Git-based filesystem using libgit2."""
import pathlib
from collections.abc import Iterator
import pygit2
from cutty.filesystems.domain.filesystem import Access
from cutty.filesystems.domain.nodefs import FilesystemNode
from cutty.filesystems.domain.nodefs import NodeFilesystem
from cutty.filesystems.domain.purepath import PurePath
class GitFilesystemNode(FilesystemNode):
"""A node in a git filesystem."""
def __init__(self, node: pygit2.Object) -> None:
"""Initialize."""
self.node = node
def is_dir(self) -> bool:
"""Return True if the node is a directory."""
return isinstance(self.node, pygit2.Tree)
def is_file(self) -> bool:
"""Return True if the node is a regular file."""
return (
isinstance(self.node, pygit2.Blob)
and self.node.filemode != pygit2.GIT_FILEMODE_LINK
)
def is_symlink(self) -> bool:
"""Return True if the node is a symbolic link."""
return (
isinstance(self.node, pygit2.Blob)
and self.node.filemode == pygit2.GIT_FILEMODE_LINK
)
def read_bytes(self) -> bytes:
"""Return the file contents."""
data: bytes = self.node.data
return data
def read_text(self) -> str:
"""Return the file contents."""
return self.read_bytes().decode()
def readlink(self) -> PurePath:
"""Return the link target."""
target: str = self.node.data.decode(errors="surrogateescape")
parts = pathlib.PurePosixPath(target).parts
return PurePath(*parts)
def iterdir(self) -> Iterator[str]:
"""Iterate over the directory entries."""
for entry in self.node:
yield entry.name
def __truediv__(self, entry: str) -> FilesystemNode:
"""Return the given directory entry."""
try:
return GitFilesystemNode(self.node / entry)
except KeyError:
raise FileNotFoundError()
def access(self, mode: Access) -> bool:
"""Return True if the user can access the node."""
return (
Access.EXECUTE not in mode
or self.node.filemode == pygit2.GIT_FILEMODE_BLOB_EXECUTABLE
)
class GitFilesystem(NodeFilesystem):
"""Git-based filesystem."""
def __init__(self, repository: pathlib.Path, ref: str = "HEAD") -> None:
"""Inititalize."""
repo = pygit2.Repository(repository)
tree = repo.revparse_single(ref).peel(pygit2.Tree)
self.root = GitFilesystemNode(tree)
|
StarcoderdataPython
|
12829869
|
<filename>slurminade/conf.py
"""
This file saves the default configuration for slurm.
"""
import json
import os.path
from pathlib import Path
def _load_default_conf():
default_conf_file = os.path.join(Path.home(), ".slurminade_default.json")
try:
if os.path.isfile(default_conf_file):
with open(default_conf_file, "r") as f:
return json.load(f)
else:
return {}
except Exception as e:
print(
f"slurminade could not open default configuration {default_conf_file}!\n{str(e)}"
)
return {}
__default_conf = _load_default_conf()
def update_default_configuration(conf=None, **kwargs):
if conf:
__default_conf.update(conf)
if kwargs:
__default_conf.update(kwargs)
def set_default_configuration(conf=None, **kwargs):
__default_conf = {}
update_default_configuration(conf, **kwargs)
def _get_conf(conf=None):
conf = conf if conf else {}
conf_ = __default_conf.copy()
conf_.update(conf)
return conf_
|
StarcoderdataPython
|
3217717
|
<reponame>VisualComputingInstitute/CROWDBOT_perception<gh_stars>1-10
from .coco_stuff_dataset import make_coco_stuff
from .coco_panoptic_dataset import make_coco_panoptic
|
StarcoderdataPython
|
5109431
|
#!/usr/bin/env python3
"""Corpus reader for the Hungarian Webcorpus."""
from __future__ import absolute_import, division, print_function
from contextlib import contextmanager
from html.parser import HTMLParser # Needs future, too
import io
import itertools
import re
import tarfile
from unicodedata import category
from emLam.corpus.corpus_base import RawCorpus
from emLam.utils import openall
class Webcorpus(RawCorpus):
NAME = 'hu_webcorpus'
DESCRIPTION = 'Hungarian Webcorpus'
rename_p = re.compile(r'\.tar(\.gz)$')
def __init__(self, max_lines, compressed=True, max_entities=0.2):
super(Webcorpus, self).__init__(max_lines)
self.compressed = compressed
self.max_entities = max_entities
self.html_parser = HTMLParser()
@contextmanager
def instream(self, input_file):
if self.compressed:
input_stream = self.enumerate_tar
else:
input_stream = self.enumerate_file
inf = itertools.chain.from_iterable(input_stream(input_file))
yield self.__read_sentence(inf)
def outstream(self, output_file):
"""Removes the 'tar' from the name of the output file, if compressed."""
if self.compressed:
output_file = self.rename_p.sub(r'\1', output_file)
return super(Webcorpus, self).outstream(output_file)
# def files_to_streams(self, input_file, output_file):
# """
# Reads input_file according to the corpus format (compressed / not). In
# the former case, modifies the output_file name so that the '.tar' part
# is not included in it.
# """
# if self.compressed:
# output_file = self.rename_p.sub(r'\1', output_file)
# input_stream = self.enumerate_tar
# else:
# input_stream = self.enumerate_file
# with openall(output_file, 'wt', encoding='utf-8') as outf:
# inf = itertools.chain.from_iterable(input_stream(input_file))
# yield self.__read_sentence(inf), outf
def __read_sentence(self, input_stream):
"""Returns a sentence a time, cleaned of HTML entities."""
for line in input_stream:
if line.startswith(u'<s>'):
text = line[3:].strip()
orig_text = text
amps = text.count(u'&')
if amps > 0:
text = self.html_parser.unescape(text)
entities = amps - text.count(u'&')
self.logger.debug(
u'Entities: {}, amps: {}, len: {}; ratio: {} => {}: {} ({})'.format(
entities, amps, len(text), entities / float(len(text)),
entities / float(len(text)) > self.max_entities,
text.strip(), orig_text.strip()).encode('utf-8'))
if entities / float(len(text)) > self.max_entities:
# Skip sentence if too many entities (i.e. foreign script)
continue
clean_text = self.__clean_text(text)
if clean_text != text:
self.logger.debug(u'Filtered text: `{}` -> `{}`'.format(
text, clean_text))
yield clean_text + u'\n'
@staticmethod
def __clean_text(text):
"""Cleans the text of all unicode shenanigans."""
clean_text = []
for c in text.replace(u'\t', u' '):
# Get rid of extended Unicode characters, which are most likely
# there by accident
if ord(c) > 65535:
continue
cat = category(c)
# Control characters are also bugs in the corpus
if cat.startswith('C'):
continue
clean_text.append(c if not cat.startswith('Z') else ' ')
return u''.join(clean_text)
@staticmethod
def enumerate_tar(archive):
if not tarfile.is_tarfile(archive):
return # TODO
with tarfile.open(archive) as tf:
for member in tf.getmembers():
if member.isfile():
member_f = tf.extractfile(member.name)
# This should work, but alas, only from Python 3.3
# yield io.TextIOWrapper(member_f, encoding='iso-8859-2')
yield io.TextIOWrapper(io.BytesIO(member_f.read()), encoding='iso-8859-2')
yield [u'<s>\n'] # To separate files
member_f.close()
@staticmethod
def enumerate_file(corp_file):
with openall(corp_file, encoding='iso-8859-2') as inf:
yield inf
|
StarcoderdataPython
|
8196141
|
# Write a program that read two numbers.
# Print the first for the second multiplication result.Utilize only the operators of sum and subtration to calculate the result.
# Remember that we can understand the multiplication of two numbers as the sucessive sums of one of them.So, 4 x 5 = 5 + 5 + 5 + 5 + 5 = 4 + 4 + 4 + 4 + 4
x=int(input("Type the first number: "))
y=int(input("Type the second number: "))
i=0
j=0
print(f"{x} x {y} =",end=" ")
if i+1==y:
print(x,end=" ")
i=i+10
while i< y :
#print(f"{x}X{y}={(x,end="+")}")
print(x,end=" + ")
i=i+1
if i+1==(y):
print(x,end=" ")
i=i+1
print("=",end=" ")
if j+1==x:
print(y,end=" ")
j=j+10
while j< x:
print(y,end=" + ")
j=j+1
if j+1==(x):
print(y,end=" ")
j=j+1
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.